Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 34fbc862

History | View | Annotate | Download (472.4 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the master-side code."""
23

    
24
# pylint: disable=W0201,C0302
25

    
26
# W0201 since most LU attributes are defined in CheckPrereq or similar
27
# functions
28

    
29
# C0302: since we have waaaay to many lines in this module
30

    
31
import os
32
import os.path
33
import time
34
import re
35
import platform
36
import logging
37
import copy
38
import OpenSSL
39
import socket
40
import tempfile
41
import shutil
42
import itertools
43
import operator
44

    
45
from ganeti import ssh
46
from ganeti import utils
47
from ganeti import errors
48
from ganeti import hypervisor
49
from ganeti import locking
50
from ganeti import constants
51
from ganeti import objects
52
from ganeti import serializer
53
from ganeti import ssconf
54
from ganeti import uidpool
55
from ganeti import compat
56
from ganeti import masterd
57
from ganeti import netutils
58
from ganeti import query
59
from ganeti import qlang
60
from ganeti import opcodes
61
from ganeti import ht
62

    
63
import ganeti.masterd.instance # pylint: disable=W0611
64

    
65

    
66
class ResultWithJobs:
67
  """Data container for LU results with jobs.
68

69
  Instances of this class returned from L{LogicalUnit.Exec} will be recognized
70
  by L{mcpu.Processor._ProcessResult}. The latter will then submit the jobs
71
  contained in the C{jobs} attribute and include the job IDs in the opcode
72
  result.
73

74
  """
75
  def __init__(self, jobs, **kwargs):
76
    """Initializes this class.
77

78
    Additional return values can be specified as keyword arguments.
79

80
    @type jobs: list of lists of L{opcode.OpCode}
81
    @param jobs: A list of lists of opcode objects
82

83
    """
84
    self.jobs = jobs
85
    self.other = kwargs
86

    
87

    
88
class LogicalUnit(object):
89
  """Logical Unit base class.
90

91
  Subclasses must follow these rules:
92
    - implement ExpandNames
93
    - implement CheckPrereq (except when tasklets are used)
94
    - implement Exec (except when tasklets are used)
95
    - implement BuildHooksEnv
96
    - implement BuildHooksNodes
97
    - redefine HPATH and HTYPE
98
    - optionally redefine their run requirements:
99
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
100

101
  Note that all commands require root permissions.
102

103
  @ivar dry_run_result: the value (if any) that will be returned to the caller
104
      in dry-run mode (signalled by opcode dry_run parameter)
105

106
  """
107
  HPATH = None
108
  HTYPE = None
109
  REQ_BGL = True
110

    
111
  def __init__(self, processor, op, context, rpc):
112
    """Constructor for LogicalUnit.
113

114
    This needs to be overridden in derived classes in order to check op
115
    validity.
116

117
    """
118
    self.proc = processor
119
    self.op = op
120
    self.cfg = context.cfg
121
    self.glm = context.glm
122
    # readability alias
123
    self.owned_locks = context.glm.list_owned
124
    self.context = context
125
    self.rpc = rpc
126
    # Dicts used to declare locking needs to mcpu
127
    self.needed_locks = None
128
    self.share_locks = dict.fromkeys(locking.LEVELS, 0)
129
    self.add_locks = {}
130
    self.remove_locks = {}
131
    # Used to force good behavior when calling helper functions
132
    self.recalculate_locks = {}
133
    # logging
134
    self.Log = processor.Log # pylint: disable=C0103
135
    self.LogWarning = processor.LogWarning # pylint: disable=C0103
136
    self.LogInfo = processor.LogInfo # pylint: disable=C0103
137
    self.LogStep = processor.LogStep # pylint: disable=C0103
138
    # support for dry-run
139
    self.dry_run_result = None
140
    # support for generic debug attribute
141
    if (not hasattr(self.op, "debug_level") or
142
        not isinstance(self.op.debug_level, int)):
143
      self.op.debug_level = 0
144

    
145
    # Tasklets
146
    self.tasklets = None
147

    
148
    # Validate opcode parameters and set defaults
149
    self.op.Validate(True)
150

    
151
    self.CheckArguments()
152

    
153
  def CheckArguments(self):
154
    """Check syntactic validity for the opcode arguments.
155

156
    This method is for doing a simple syntactic check and ensure
157
    validity of opcode parameters, without any cluster-related
158
    checks. While the same can be accomplished in ExpandNames and/or
159
    CheckPrereq, doing these separate is better because:
160

161
      - ExpandNames is left as as purely a lock-related function
162
      - CheckPrereq is run after we have acquired locks (and possible
163
        waited for them)
164

165
    The function is allowed to change the self.op attribute so that
166
    later methods can no longer worry about missing parameters.
167

168
    """
169
    pass
170

    
171
  def ExpandNames(self):
172
    """Expand names for this LU.
173

174
    This method is called before starting to execute the opcode, and it should
175
    update all the parameters of the opcode to their canonical form (e.g. a
176
    short node name must be fully expanded after this method has successfully
177
    completed). This way locking, hooks, logging, etc. can work correctly.
178

179
    LUs which implement this method must also populate the self.needed_locks
180
    member, as a dict with lock levels as keys, and a list of needed lock names
181
    as values. Rules:
182

183
      - use an empty dict if you don't need any lock
184
      - if you don't need any lock at a particular level omit that level
185
      - don't put anything for the BGL level
186
      - if you want all locks at a level use locking.ALL_SET as a value
187

188
    If you need to share locks (rather than acquire them exclusively) at one
189
    level you can modify self.share_locks, setting a true value (usually 1) for
190
    that level. By default locks are not shared.
191

192
    This function can also define a list of tasklets, which then will be
193
    executed in order instead of the usual LU-level CheckPrereq and Exec
194
    functions, if those are not defined by the LU.
195

196
    Examples::
197

198
      # Acquire all nodes and one instance
199
      self.needed_locks = {
200
        locking.LEVEL_NODE: locking.ALL_SET,
201
        locking.LEVEL_INSTANCE: ['instance1.example.com'],
202
      }
203
      # Acquire just two nodes
204
      self.needed_locks = {
205
        locking.LEVEL_NODE: ['node1.example.com', 'node2.example.com'],
206
      }
207
      # Acquire no locks
208
      self.needed_locks = {} # No, you can't leave it to the default value None
209

210
    """
211
    # The implementation of this method is mandatory only if the new LU is
212
    # concurrent, so that old LUs don't need to be changed all at the same
213
    # time.
214
    if self.REQ_BGL:
215
      self.needed_locks = {} # Exclusive LUs don't need locks.
216
    else:
217
      raise NotImplementedError
218

    
219
  def DeclareLocks(self, level):
220
    """Declare LU locking needs for a level
221

222
    While most LUs can just declare their locking needs at ExpandNames time,
223
    sometimes there's the need to calculate some locks after having acquired
224
    the ones before. This function is called just before acquiring locks at a
225
    particular level, but after acquiring the ones at lower levels, and permits
226
    such calculations. It can be used to modify self.needed_locks, and by
227
    default it does nothing.
228

229
    This function is only called if you have something already set in
230
    self.needed_locks for the level.
231

232
    @param level: Locking level which is going to be locked
233
    @type level: member of ganeti.locking.LEVELS
234

235
    """
236

    
237
  def CheckPrereq(self):
238
    """Check prerequisites for this LU.
239

240
    This method should check that the prerequisites for the execution
241
    of this LU are fulfilled. It can do internode communication, but
242
    it should be idempotent - no cluster or system changes are
243
    allowed.
244

245
    The method should raise errors.OpPrereqError in case something is
246
    not fulfilled. Its return value is ignored.
247

248
    This method should also update all the parameters of the opcode to
249
    their canonical form if it hasn't been done by ExpandNames before.
250

251
    """
252
    if self.tasklets is not None:
253
      for (idx, tl) in enumerate(self.tasklets):
254
        logging.debug("Checking prerequisites for tasklet %s/%s",
255
                      idx + 1, len(self.tasklets))
256
        tl.CheckPrereq()
257
    else:
258
      pass
259

    
260
  def Exec(self, feedback_fn):
261
    """Execute the LU.
262

263
    This method should implement the actual work. It should raise
264
    errors.OpExecError for failures that are somewhat dealt with in
265
    code, or expected.
266

267
    """
268
    if self.tasklets is not None:
269
      for (idx, tl) in enumerate(self.tasklets):
270
        logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
271
        tl.Exec(feedback_fn)
272
    else:
273
      raise NotImplementedError
274

    
275
  def BuildHooksEnv(self):
276
    """Build hooks environment for this LU.
277

278
    @rtype: dict
279
    @return: Dictionary containing the environment that will be used for
280
      running the hooks for this LU. The keys of the dict must not be prefixed
281
      with "GANETI_"--that'll be added by the hooks runner. The hooks runner
282
      will extend the environment with additional variables. If no environment
283
      should be defined, an empty dictionary should be returned (not C{None}).
284
    @note: If the C{HPATH} attribute of the LU class is C{None}, this function
285
      will not be called.
286

287
    """
288
    raise NotImplementedError
289

    
290
  def BuildHooksNodes(self):
291
    """Build list of nodes to run LU's hooks.
292

293
    @rtype: tuple; (list, list)
294
    @return: Tuple containing a list of node names on which the hook
295
      should run before the execution and a list of node names on which the
296
      hook should run after the execution. No nodes should be returned as an
297
      empty list (and not None).
298
    @note: If the C{HPATH} attribute of the LU class is C{None}, this function
299
      will not be called.
300

301
    """
302
    raise NotImplementedError
303

    
304
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
305
    """Notify the LU about the results of its hooks.
306

307
    This method is called every time a hooks phase is executed, and notifies
308
    the Logical Unit about the hooks' result. The LU can then use it to alter
309
    its result based on the hooks.  By default the method does nothing and the
310
    previous result is passed back unchanged but any LU can define it if it
311
    wants to use the local cluster hook-scripts somehow.
312

313
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
314
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
315
    @param hook_results: the results of the multi-node hooks rpc call
316
    @param feedback_fn: function used send feedback back to the caller
317
    @param lu_result: the previous Exec result this LU had, or None
318
        in the PRE phase
319
    @return: the new Exec result, based on the previous result
320
        and hook results
321

322
    """
323
    # API must be kept, thus we ignore the unused argument and could
324
    # be a function warnings
325
    # pylint: disable=W0613,R0201
326
    return lu_result
327

    
328
  def _ExpandAndLockInstance(self):
329
    """Helper function to expand and lock an instance.
330

331
    Many LUs that work on an instance take its name in self.op.instance_name
332
    and need to expand it and then declare the expanded name for locking. This
333
    function does it, and then updates self.op.instance_name to the expanded
334
    name. It also initializes needed_locks as a dict, if this hasn't been done
335
    before.
336

337
    """
338
    if self.needed_locks is None:
339
      self.needed_locks = {}
340
    else:
341
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
342
        "_ExpandAndLockInstance called with instance-level locks set"
343
    self.op.instance_name = _ExpandInstanceName(self.cfg,
344
                                                self.op.instance_name)
345
    self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
346

    
347
  def _LockInstancesNodes(self, primary_only=False):
348
    """Helper function to declare instances' nodes for locking.
349

350
    This function should be called after locking one or more instances to lock
351
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
352
    with all primary or secondary nodes for instances already locked and
353
    present in self.needed_locks[locking.LEVEL_INSTANCE].
354

355
    It should be called from DeclareLocks, and for safety only works if
356
    self.recalculate_locks[locking.LEVEL_NODE] is set.
357

358
    In the future it may grow parameters to just lock some instance's nodes, or
359
    to just lock primaries or secondary nodes, if needed.
360

361
    If should be called in DeclareLocks in a way similar to::
362

363
      if level == locking.LEVEL_NODE:
364
        self._LockInstancesNodes()
365

366
    @type primary_only: boolean
367
    @param primary_only: only lock primary nodes of locked instances
368

369
    """
370
    assert locking.LEVEL_NODE in self.recalculate_locks, \
371
      "_LockInstancesNodes helper function called with no nodes to recalculate"
372

    
373
    # TODO: check if we're really been called with the instance locks held
374

    
375
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
376
    # future we might want to have different behaviors depending on the value
377
    # of self.recalculate_locks[locking.LEVEL_NODE]
378
    wanted_nodes = []
379
    locked_i = self.owned_locks(locking.LEVEL_INSTANCE)
380
    for _, instance in self.cfg.GetMultiInstanceInfo(locked_i):
381
      wanted_nodes.append(instance.primary_node)
382
      if not primary_only:
383
        wanted_nodes.extend(instance.secondary_nodes)
384

    
385
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
386
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
387
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
388
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
389

    
390
    del self.recalculate_locks[locking.LEVEL_NODE]
391

    
392

    
393
class NoHooksLU(LogicalUnit): # pylint: disable=W0223
394
  """Simple LU which runs no hooks.
395

396
  This LU is intended as a parent for other LogicalUnits which will
397
  run no hooks, in order to reduce duplicate code.
398

399
  """
400
  HPATH = None
401
  HTYPE = None
402

    
403
  def BuildHooksEnv(self):
404
    """Empty BuildHooksEnv for NoHooksLu.
405

406
    This just raises an error.
407

408
    """
409
    raise AssertionError("BuildHooksEnv called for NoHooksLUs")
410

    
411
  def BuildHooksNodes(self):
412
    """Empty BuildHooksNodes for NoHooksLU.
413

414
    """
415
    raise AssertionError("BuildHooksNodes called for NoHooksLU")
416

    
417

    
418
class Tasklet:
419
  """Tasklet base class.
420

421
  Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
422
  they can mix legacy code with tasklets. Locking needs to be done in the LU,
423
  tasklets know nothing about locks.
424

425
  Subclasses must follow these rules:
426
    - Implement CheckPrereq
427
    - Implement Exec
428

429
  """
430
  def __init__(self, lu):
431
    self.lu = lu
432

    
433
    # Shortcuts
434
    self.cfg = lu.cfg
435
    self.rpc = lu.rpc
436

    
437
  def CheckPrereq(self):
438
    """Check prerequisites for this tasklets.
439

440
    This method should check whether the prerequisites for the execution of
441
    this tasklet are fulfilled. It can do internode communication, but it
442
    should be idempotent - no cluster or system changes are allowed.
443

444
    The method should raise errors.OpPrereqError in case something is not
445
    fulfilled. Its return value is ignored.
446

447
    This method should also update all parameters to their canonical form if it
448
    hasn't been done before.
449

450
    """
451
    pass
452

    
453
  def Exec(self, feedback_fn):
454
    """Execute the tasklet.
455

456
    This method should implement the actual work. It should raise
457
    errors.OpExecError for failures that are somewhat dealt with in code, or
458
    expected.
459

460
    """
461
    raise NotImplementedError
462

    
463

    
464
class _QueryBase:
465
  """Base for query utility classes.
466

467
  """
468
  #: Attribute holding field definitions
469
  FIELDS = None
470

    
471
  def __init__(self, filter_, fields, use_locking):
472
    """Initializes this class.
473

474
    """
475
    self.use_locking = use_locking
476

    
477
    self.query = query.Query(self.FIELDS, fields, filter_=filter_,
478
                             namefield="name")
479
    self.requested_data = self.query.RequestedData()
480
    self.names = self.query.RequestedNames()
481

    
482
    # Sort only if no names were requested
483
    self.sort_by_name = not self.names
484

    
485
    self.do_locking = None
486
    self.wanted = None
487

    
488
  def _GetNames(self, lu, all_names, lock_level):
489
    """Helper function to determine names asked for in the query.
490

491
    """
492
    if self.do_locking:
493
      names = lu.owned_locks(lock_level)
494
    else:
495
      names = all_names
496

    
497
    if self.wanted == locking.ALL_SET:
498
      assert not self.names
499
      # caller didn't specify names, so ordering is not important
500
      return utils.NiceSort(names)
501

    
502
    # caller specified names and we must keep the same order
503
    assert self.names
504
    assert not self.do_locking or lu.glm.is_owned(lock_level)
505

    
506
    missing = set(self.wanted).difference(names)
507
    if missing:
508
      raise errors.OpExecError("Some items were removed before retrieving"
509
                               " their data: %s" % missing)
510

    
511
    # Return expanded names
512
    return self.wanted
513

    
514
  def ExpandNames(self, lu):
515
    """Expand names for this query.
516

517
    See L{LogicalUnit.ExpandNames}.
518

519
    """
520
    raise NotImplementedError()
521

    
522
  def DeclareLocks(self, lu, level):
523
    """Declare locks for this query.
524

525
    See L{LogicalUnit.DeclareLocks}.
526

527
    """
528
    raise NotImplementedError()
529

    
530
  def _GetQueryData(self, lu):
531
    """Collects all data for this query.
532

533
    @return: Query data object
534

535
    """
536
    raise NotImplementedError()
537

    
538
  def NewStyleQuery(self, lu):
539
    """Collect data and execute query.
540

541
    """
542
    return query.GetQueryResponse(self.query, self._GetQueryData(lu),
543
                                  sort_by_name=self.sort_by_name)
544

    
545
  def OldStyleQuery(self, lu):
546
    """Collect data and execute query.
547

548
    """
549
    return self.query.OldStyleQuery(self._GetQueryData(lu),
550
                                    sort_by_name=self.sort_by_name)
551

    
552

    
553
def _ShareAll():
554
  """Returns a dict declaring all lock levels shared.
555

556
  """
557
  return dict.fromkeys(locking.LEVELS, 1)
558

    
559

    
560
def _CheckInstanceNodeGroups(cfg, instance_name, owned_groups):
561
  """Checks if the owned node groups are still correct for an instance.
562

563
  @type cfg: L{config.ConfigWriter}
564
  @param cfg: The cluster configuration
565
  @type instance_name: string
566
  @param instance_name: Instance name
567
  @type owned_groups: set or frozenset
568
  @param owned_groups: List of currently owned node groups
569

570
  """
571
  inst_groups = cfg.GetInstanceNodeGroups(instance_name)
572

    
573
  if not owned_groups.issuperset(inst_groups):
574
    raise errors.OpPrereqError("Instance %s's node groups changed since"
575
                               " locks were acquired, current groups are"
576
                               " are '%s', owning groups '%s'; retry the"
577
                               " operation" %
578
                               (instance_name,
579
                                utils.CommaJoin(inst_groups),
580
                                utils.CommaJoin(owned_groups)),
581
                               errors.ECODE_STATE)
582

    
583
  return inst_groups
584

    
585

    
586
def _CheckNodeGroupInstances(cfg, group_uuid, owned_instances):
587
  """Checks if the instances in a node group are still correct.
588

589
  @type cfg: L{config.ConfigWriter}
590
  @param cfg: The cluster configuration
591
  @type group_uuid: string
592
  @param group_uuid: Node group UUID
593
  @type owned_instances: set or frozenset
594
  @param owned_instances: List of currently owned instances
595

596
  """
597
  wanted_instances = cfg.GetNodeGroupInstances(group_uuid)
598
  if owned_instances != wanted_instances:
599
    raise errors.OpPrereqError("Instances in node group '%s' changed since"
600
                               " locks were acquired, wanted '%s', have '%s';"
601
                               " retry the operation" %
602
                               (group_uuid,
603
                                utils.CommaJoin(wanted_instances),
604
                                utils.CommaJoin(owned_instances)),
605
                               errors.ECODE_STATE)
606

    
607
  return wanted_instances
608

    
609

    
610
def _SupportsOob(cfg, node):
611
  """Tells if node supports OOB.
612

613
  @type cfg: L{config.ConfigWriter}
614
  @param cfg: The cluster configuration
615
  @type node: L{objects.Node}
616
  @param node: The node
617
  @return: The OOB script if supported or an empty string otherwise
618

619
  """
620
  return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
621

    
622

    
623
def _GetWantedNodes(lu, nodes):
624
  """Returns list of checked and expanded node names.
625

626
  @type lu: L{LogicalUnit}
627
  @param lu: the logical unit on whose behalf we execute
628
  @type nodes: list
629
  @param nodes: list of node names or None for all nodes
630
  @rtype: list
631
  @return: the list of nodes, sorted
632
  @raise errors.ProgrammerError: if the nodes parameter is wrong type
633

634
  """
635
  if nodes:
636
    return [_ExpandNodeName(lu.cfg, name) for name in nodes]
637

    
638
  return utils.NiceSort(lu.cfg.GetNodeList())
639

    
640

    
641
def _GetWantedInstances(lu, instances):
642
  """Returns list of checked and expanded instance names.
643

644
  @type lu: L{LogicalUnit}
645
  @param lu: the logical unit on whose behalf we execute
646
  @type instances: list
647
  @param instances: list of instance names or None for all instances
648
  @rtype: list
649
  @return: the list of instances, sorted
650
  @raise errors.OpPrereqError: if the instances parameter is wrong type
651
  @raise errors.OpPrereqError: if any of the passed instances is not found
652

653
  """
654
  if instances:
655
    wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
656
  else:
657
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
658
  return wanted
659

    
660

    
661
def _GetUpdatedParams(old_params, update_dict,
662
                      use_default=True, use_none=False):
663
  """Return the new version of a parameter dictionary.
664

665
  @type old_params: dict
666
  @param old_params: old parameters
667
  @type update_dict: dict
668
  @param update_dict: dict containing new parameter values, or
669
      constants.VALUE_DEFAULT to reset the parameter to its default
670
      value
671
  @param use_default: boolean
672
  @type use_default: whether to recognise L{constants.VALUE_DEFAULT}
673
      values as 'to be deleted' values
674
  @param use_none: boolean
675
  @type use_none: whether to recognise C{None} values as 'to be
676
      deleted' values
677
  @rtype: dict
678
  @return: the new parameter dictionary
679

680
  """
681
  params_copy = copy.deepcopy(old_params)
682
  for key, val in update_dict.iteritems():
683
    if ((use_default and val == constants.VALUE_DEFAULT) or
684
        (use_none and val is None)):
685
      try:
686
        del params_copy[key]
687
      except KeyError:
688
        pass
689
    else:
690
      params_copy[key] = val
691
  return params_copy
692

    
693

    
694
def _ReleaseLocks(lu, level, names=None, keep=None):
695
  """Releases locks owned by an LU.
696

697
  @type lu: L{LogicalUnit}
698
  @param level: Lock level
699
  @type names: list or None
700
  @param names: Names of locks to release
701
  @type keep: list or None
702
  @param keep: Names of locks to retain
703

704
  """
705
  assert not (keep is not None and names is not None), \
706
         "Only one of the 'names' and the 'keep' parameters can be given"
707

    
708
  if names is not None:
709
    should_release = names.__contains__
710
  elif keep:
711
    should_release = lambda name: name not in keep
712
  else:
713
    should_release = None
714

    
715
  if should_release:
716
    retain = []
717
    release = []
718

    
719
    # Determine which locks to release
720
    for name in lu.owned_locks(level):
721
      if should_release(name):
722
        release.append(name)
723
      else:
724
        retain.append(name)
725

    
726
    assert len(lu.owned_locks(level)) == (len(retain) + len(release))
727

    
728
    # Release just some locks
729
    lu.glm.release(level, names=release)
730

    
731
    assert frozenset(lu.owned_locks(level)) == frozenset(retain)
732
  else:
733
    # Release everything
734
    lu.glm.release(level)
735

    
736
    assert not lu.glm.is_owned(level), "No locks should be owned"
737

    
738

    
739
def _MapInstanceDisksToNodes(instances):
740
  """Creates a map from (node, volume) to instance name.
741

742
  @type instances: list of L{objects.Instance}
743
  @rtype: dict; tuple of (node name, volume name) as key, instance name as value
744

745
  """
746
  return dict(((node, vol), inst.name)
747
              for inst in instances
748
              for (node, vols) in inst.MapLVsByNode().items()
749
              for vol in vols)
750

    
751

    
752
def _RunPostHook(lu, node_name):
753
  """Runs the post-hook for an opcode on a single node.
754

755
  """
756
  hm = lu.proc.hmclass(lu.rpc.call_hooks_runner, lu)
757
  try:
758
    hm.RunPhase(constants.HOOKS_PHASE_POST, nodes=[node_name])
759
  except:
760
    # pylint: disable=W0702
761
    lu.LogWarning("Errors occurred running hooks on %s" % node_name)
762

    
763

    
764
def _CheckOutputFields(static, dynamic, selected):
765
  """Checks whether all selected fields are valid.
766

767
  @type static: L{utils.FieldSet}
768
  @param static: static fields set
769
  @type dynamic: L{utils.FieldSet}
770
  @param dynamic: dynamic fields set
771

772
  """
773
  f = utils.FieldSet()
774
  f.Extend(static)
775
  f.Extend(dynamic)
776

    
777
  delta = f.NonMatching(selected)
778
  if delta:
779
    raise errors.OpPrereqError("Unknown output fields selected: %s"
780
                               % ",".join(delta), errors.ECODE_INVAL)
781

    
782

    
783
def _CheckGlobalHvParams(params):
784
  """Validates that given hypervisor params are not global ones.
785

786
  This will ensure that instances don't get customised versions of
787
  global params.
788

789
  """
790
  used_globals = constants.HVC_GLOBALS.intersection(params)
791
  if used_globals:
792
    msg = ("The following hypervisor parameters are global and cannot"
793
           " be customized at instance level, please modify them at"
794
           " cluster level: %s" % utils.CommaJoin(used_globals))
795
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
796

    
797

    
798
def _CheckNodeOnline(lu, node, msg=None):
799
  """Ensure that a given node is online.
800

801
  @param lu: the LU on behalf of which we make the check
802
  @param node: the node to check
803
  @param msg: if passed, should be a message to replace the default one
804
  @raise errors.OpPrereqError: if the node is offline
805

806
  """
807
  if msg is None:
808
    msg = "Can't use offline node"
809
  if lu.cfg.GetNodeInfo(node).offline:
810
    raise errors.OpPrereqError("%s: %s" % (msg, node), errors.ECODE_STATE)
811

    
812

    
813
def _CheckNodeNotDrained(lu, node):
814
  """Ensure that a given node is not drained.
815

816
  @param lu: the LU on behalf of which we make the check
817
  @param node: the node to check
818
  @raise errors.OpPrereqError: if the node is drained
819

820
  """
821
  if lu.cfg.GetNodeInfo(node).drained:
822
    raise errors.OpPrereqError("Can't use drained node %s" % node,
823
                               errors.ECODE_STATE)
824

    
825

    
826
def _CheckNodeVmCapable(lu, node):
827
  """Ensure that a given node is vm capable.
828

829
  @param lu: the LU on behalf of which we make the check
830
  @param node: the node to check
831
  @raise errors.OpPrereqError: if the node is not vm capable
832

833
  """
834
  if not lu.cfg.GetNodeInfo(node).vm_capable:
835
    raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node,
836
                               errors.ECODE_STATE)
837

    
838

    
839
def _CheckNodeHasOS(lu, node, os_name, force_variant):
840
  """Ensure that a node supports a given OS.
841

842
  @param lu: the LU on behalf of which we make the check
843
  @param node: the node to check
844
  @param os_name: the OS to query about
845
  @param force_variant: whether to ignore variant errors
846
  @raise errors.OpPrereqError: if the node is not supporting the OS
847

848
  """
849
  result = lu.rpc.call_os_get(node, os_name)
850
  result.Raise("OS '%s' not in supported OS list for node %s" %
851
               (os_name, node),
852
               prereq=True, ecode=errors.ECODE_INVAL)
853
  if not force_variant:
854
    _CheckOSVariant(result.payload, os_name)
855

    
856

    
857
def _CheckNodeHasSecondaryIP(lu, node, secondary_ip, prereq):
858
  """Ensure that a node has the given secondary ip.
859

860
  @type lu: L{LogicalUnit}
861
  @param lu: the LU on behalf of which we make the check
862
  @type node: string
863
  @param node: the node to check
864
  @type secondary_ip: string
865
  @param secondary_ip: the ip to check
866
  @type prereq: boolean
867
  @param prereq: whether to throw a prerequisite or an execute error
868
  @raise errors.OpPrereqError: if the node doesn't have the ip, and prereq=True
869
  @raise errors.OpExecError: if the node doesn't have the ip, and prereq=False
870

871
  """
872
  result = lu.rpc.call_node_has_ip_address(node, secondary_ip)
873
  result.Raise("Failure checking secondary ip on node %s" % node,
874
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
875
  if not result.payload:
876
    msg = ("Node claims it doesn't have the secondary ip you gave (%s),"
877
           " please fix and re-run this command" % secondary_ip)
878
    if prereq:
879
      raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
880
    else:
881
      raise errors.OpExecError(msg)
882

    
883

    
884
def _GetClusterDomainSecret():
885
  """Reads the cluster domain secret.
886

887
  """
888
  return utils.ReadOneLineFile(constants.CLUSTER_DOMAIN_SECRET_FILE,
889
                               strict=True)
890

    
891

    
892
def _CheckInstanceDown(lu, instance, reason):
893
  """Ensure that an instance is not running."""
894
  if instance.admin_up:
895
    raise errors.OpPrereqError("Instance %s is marked to be up, %s" %
896
                               (instance.name, reason), errors.ECODE_STATE)
897

    
898
  pnode = instance.primary_node
899
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
900
  ins_l.Raise("Can't contact node %s for instance information" % pnode,
901
              prereq=True, ecode=errors.ECODE_ENVIRON)
902

    
903
  if instance.name in ins_l.payload:
904
    raise errors.OpPrereqError("Instance %s is running, %s" %
905
                               (instance.name, reason), errors.ECODE_STATE)
906

    
907

    
908
def _ExpandItemName(fn, name, kind):
909
  """Expand an item name.
910

911
  @param fn: the function to use for expansion
912
  @param name: requested item name
913
  @param kind: text description ('Node' or 'Instance')
914
  @return: the resolved (full) name
915
  @raise errors.OpPrereqError: if the item is not found
916

917
  """
918
  full_name = fn(name)
919
  if full_name is None:
920
    raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
921
                               errors.ECODE_NOENT)
922
  return full_name
923

    
924

    
925
def _ExpandNodeName(cfg, name):
926
  """Wrapper over L{_ExpandItemName} for nodes."""
927
  return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
928

    
929

    
930
def _ExpandInstanceName(cfg, name):
931
  """Wrapper over L{_ExpandItemName} for instance."""
932
  return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
933

    
934

    
935
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
936
                          memory, vcpus, nics, disk_template, disks,
937
                          bep, hvp, hypervisor_name, tags):
938
  """Builds instance related env variables for hooks
939

940
  This builds the hook environment from individual variables.
941

942
  @type name: string
943
  @param name: the name of the instance
944
  @type primary_node: string
945
  @param primary_node: the name of the instance's primary node
946
  @type secondary_nodes: list
947
  @param secondary_nodes: list of secondary nodes as strings
948
  @type os_type: string
949
  @param os_type: the name of the instance's OS
950
  @type status: boolean
951
  @param status: the should_run status of the instance
952
  @type memory: string
953
  @param memory: the memory size of the instance
954
  @type vcpus: string
955
  @param vcpus: the count of VCPUs the instance has
956
  @type nics: list
957
  @param nics: list of tuples (ip, mac, mode, link) representing
958
      the NICs the instance has
959
  @type disk_template: string
960
  @param disk_template: the disk template of the instance
961
  @type disks: list
962
  @param disks: the list of (size, mode) pairs
963
  @type bep: dict
964
  @param bep: the backend parameters for the instance
965
  @type hvp: dict
966
  @param hvp: the hypervisor parameters for the instance
967
  @type hypervisor_name: string
968
  @param hypervisor_name: the hypervisor for the instance
969
  @type tags: list
970
  @param tags: list of instance tags as strings
971
  @rtype: dict
972
  @return: the hook environment for this instance
973

974
  """
975
  if status:
976
    str_status = "up"
977
  else:
978
    str_status = "down"
979
  env = {
980
    "OP_TARGET": name,
981
    "INSTANCE_NAME": name,
982
    "INSTANCE_PRIMARY": primary_node,
983
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
984
    "INSTANCE_OS_TYPE": os_type,
985
    "INSTANCE_STATUS": str_status,
986
    "INSTANCE_MEMORY": memory,
987
    "INSTANCE_VCPUS": vcpus,
988
    "INSTANCE_DISK_TEMPLATE": disk_template,
989
    "INSTANCE_HYPERVISOR": hypervisor_name,
990
  }
991

    
992
  if nics:
993
    nic_count = len(nics)
994
    for idx, (ip, mac, mode, link) in enumerate(nics):
995
      if ip is None:
996
        ip = ""
997
      env["INSTANCE_NIC%d_IP" % idx] = ip
998
      env["INSTANCE_NIC%d_MAC" % idx] = mac
999
      env["INSTANCE_NIC%d_MODE" % idx] = mode
1000
      env["INSTANCE_NIC%d_LINK" % idx] = link
1001
      if mode == constants.NIC_MODE_BRIDGED:
1002
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
1003
  else:
1004
    nic_count = 0
1005

    
1006
  env["INSTANCE_NIC_COUNT"] = nic_count
1007

    
1008
  if disks:
1009
    disk_count = len(disks)
1010
    for idx, (size, mode) in enumerate(disks):
1011
      env["INSTANCE_DISK%d_SIZE" % idx] = size
1012
      env["INSTANCE_DISK%d_MODE" % idx] = mode
1013
  else:
1014
    disk_count = 0
1015

    
1016
  env["INSTANCE_DISK_COUNT"] = disk_count
1017

    
1018
  if not tags:
1019
    tags = []
1020

    
1021
  env["INSTANCE_TAGS"] = " ".join(tags)
1022

    
1023
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
1024
    for key, value in source.items():
1025
      env["INSTANCE_%s_%s" % (kind, key)] = value
1026

    
1027
  return env
1028

    
1029

    
1030
def _NICListToTuple(lu, nics):
1031
  """Build a list of nic information tuples.
1032

1033
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
1034
  value in LUInstanceQueryData.
1035

1036
  @type lu:  L{LogicalUnit}
1037
  @param lu: the logical unit on whose behalf we execute
1038
  @type nics: list of L{objects.NIC}
1039
  @param nics: list of nics to convert to hooks tuples
1040

1041
  """
1042
  hooks_nics = []
1043
  cluster = lu.cfg.GetClusterInfo()
1044
  for nic in nics:
1045
    ip = nic.ip
1046
    mac = nic.mac
1047
    filled_params = cluster.SimpleFillNIC(nic.nicparams)
1048
    mode = filled_params[constants.NIC_MODE]
1049
    link = filled_params[constants.NIC_LINK]
1050
    hooks_nics.append((ip, mac, mode, link))
1051
  return hooks_nics
1052

    
1053

    
1054
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
1055
  """Builds instance related env variables for hooks from an object.
1056

1057
  @type lu: L{LogicalUnit}
1058
  @param lu: the logical unit on whose behalf we execute
1059
  @type instance: L{objects.Instance}
1060
  @param instance: the instance for which we should build the
1061
      environment
1062
  @type override: dict
1063
  @param override: dictionary with key/values that will override
1064
      our values
1065
  @rtype: dict
1066
  @return: the hook environment dictionary
1067

1068
  """
1069
  cluster = lu.cfg.GetClusterInfo()
1070
  bep = cluster.FillBE(instance)
1071
  hvp = cluster.FillHV(instance)
1072
  args = {
1073
    "name": instance.name,
1074
    "primary_node": instance.primary_node,
1075
    "secondary_nodes": instance.secondary_nodes,
1076
    "os_type": instance.os,
1077
    "status": instance.admin_up,
1078
    "memory": bep[constants.BE_MEMORY],
1079
    "vcpus": bep[constants.BE_VCPUS],
1080
    "nics": _NICListToTuple(lu, instance.nics),
1081
    "disk_template": instance.disk_template,
1082
    "disks": [(disk.size, disk.mode) for disk in instance.disks],
1083
    "bep": bep,
1084
    "hvp": hvp,
1085
    "hypervisor_name": instance.hypervisor,
1086
    "tags": instance.tags,
1087
  }
1088
  if override:
1089
    args.update(override)
1090
  return _BuildInstanceHookEnv(**args) # pylint: disable=W0142
1091

    
1092

    
1093
def _AdjustCandidatePool(lu, exceptions):
1094
  """Adjust the candidate pool after node operations.
1095

1096
  """
1097
  mod_list = lu.cfg.MaintainCandidatePool(exceptions)
1098
  if mod_list:
1099
    lu.LogInfo("Promoted nodes to master candidate role: %s",
1100
               utils.CommaJoin(node.name for node in mod_list))
1101
    for name in mod_list:
1102
      lu.context.ReaddNode(name)
1103
  mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
1104
  if mc_now > mc_max:
1105
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
1106
               (mc_now, mc_max))
1107

    
1108

    
1109
def _DecideSelfPromotion(lu, exceptions=None):
1110
  """Decide whether I should promote myself as a master candidate.
1111

1112
  """
1113
  cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
1114
  mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
1115
  # the new node will increase mc_max with one, so:
1116
  mc_should = min(mc_should + 1, cp_size)
1117
  return mc_now < mc_should
1118

    
1119

    
1120
def _CheckNicsBridgesExist(lu, target_nics, target_node):
1121
  """Check that the brigdes needed by a list of nics exist.
1122

1123
  """
1124
  cluster = lu.cfg.GetClusterInfo()
1125
  paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in target_nics]
1126
  brlist = [params[constants.NIC_LINK] for params in paramslist
1127
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
1128
  if brlist:
1129
    result = lu.rpc.call_bridges_exist(target_node, brlist)
1130
    result.Raise("Error checking bridges on destination node '%s'" %
1131
                 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
1132

    
1133

    
1134
def _CheckInstanceBridgesExist(lu, instance, node=None):
1135
  """Check that the brigdes needed by an instance exist.
1136

1137
  """
1138
  if node is None:
1139
    node = instance.primary_node
1140
  _CheckNicsBridgesExist(lu, instance.nics, node)
1141

    
1142

    
1143
def _CheckOSVariant(os_obj, name):
1144
  """Check whether an OS name conforms to the os variants specification.
1145

1146
  @type os_obj: L{objects.OS}
1147
  @param os_obj: OS object to check
1148
  @type name: string
1149
  @param name: OS name passed by the user, to check for validity
1150

1151
  """
1152
  variant = objects.OS.GetVariant(name)
1153
  if not os_obj.supported_variants:
1154
    if variant:
1155
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
1156
                                 " passed)" % (os_obj.name, variant),
1157
                                 errors.ECODE_INVAL)
1158
    return
1159
  if not variant:
1160
    raise errors.OpPrereqError("OS name must include a variant",
1161
                               errors.ECODE_INVAL)
1162

    
1163
  if variant not in os_obj.supported_variants:
1164
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
1165

    
1166

    
1167
def _GetNodeInstancesInner(cfg, fn):
1168
  return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
1169

    
1170

    
1171
def _GetNodeInstances(cfg, node_name):
1172
  """Returns a list of all primary and secondary instances on a node.
1173

1174
  """
1175

    
1176
  return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
1177

    
1178

    
1179
def _GetNodePrimaryInstances(cfg, node_name):
1180
  """Returns primary instances on a node.
1181

1182
  """
1183
  return _GetNodeInstancesInner(cfg,
1184
                                lambda inst: node_name == inst.primary_node)
1185

    
1186

    
1187
def _GetNodeSecondaryInstances(cfg, node_name):
1188
  """Returns secondary instances on a node.
1189

1190
  """
1191
  return _GetNodeInstancesInner(cfg,
1192
                                lambda inst: node_name in inst.secondary_nodes)
1193

    
1194

    
1195
def _GetStorageTypeArgs(cfg, storage_type):
1196
  """Returns the arguments for a storage type.
1197

1198
  """
1199
  # Special case for file storage
1200
  if storage_type == constants.ST_FILE:
1201
    # storage.FileStorage wants a list of storage directories
1202
    return [[cfg.GetFileStorageDir(), cfg.GetSharedFileStorageDir()]]
1203

    
1204
  return []
1205

    
1206

    
1207
def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
1208
  faulty = []
1209

    
1210
  for dev in instance.disks:
1211
    cfg.SetDiskID(dev, node_name)
1212

    
1213
  result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
1214
  result.Raise("Failed to get disk status from node %s" % node_name,
1215
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
1216

    
1217
  for idx, bdev_status in enumerate(result.payload):
1218
    if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
1219
      faulty.append(idx)
1220

    
1221
  return faulty
1222

    
1223

    
1224
def _CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
1225
  """Check the sanity of iallocator and node arguments and use the
1226
  cluster-wide iallocator if appropriate.
1227

1228
  Check that at most one of (iallocator, node) is specified. If none is
1229
  specified, then the LU's opcode's iallocator slot is filled with the
1230
  cluster-wide default iallocator.
1231

1232
  @type iallocator_slot: string
1233
  @param iallocator_slot: the name of the opcode iallocator slot
1234
  @type node_slot: string
1235
  @param node_slot: the name of the opcode target node slot
1236

1237
  """
1238
  node = getattr(lu.op, node_slot, None)
1239
  iallocator = getattr(lu.op, iallocator_slot, None)
1240

    
1241
  if node is not None and iallocator is not None:
1242
    raise errors.OpPrereqError("Do not specify both, iallocator and node",
1243
                               errors.ECODE_INVAL)
1244
  elif node is None and iallocator is None:
1245
    default_iallocator = lu.cfg.GetDefaultIAllocator()
1246
    if default_iallocator:
1247
      setattr(lu.op, iallocator_slot, default_iallocator)
1248
    else:
1249
      raise errors.OpPrereqError("No iallocator or node given and no"
1250
                                 " cluster-wide default iallocator found;"
1251
                                 " please specify either an iallocator or a"
1252
                                 " node, or set a cluster-wide default"
1253
                                 " iallocator")
1254

    
1255

    
1256
def _GetDefaultIAllocator(cfg, iallocator):
1257
  """Decides on which iallocator to use.
1258

1259
  @type cfg: L{config.ConfigWriter}
1260
  @param cfg: Cluster configuration object
1261
  @type iallocator: string or None
1262
  @param iallocator: Iallocator specified in opcode
1263
  @rtype: string
1264
  @return: Iallocator name
1265

1266
  """
1267
  if not iallocator:
1268
    # Use default iallocator
1269
    iallocator = cfg.GetDefaultIAllocator()
1270

    
1271
  if not iallocator:
1272
    raise errors.OpPrereqError("No iallocator was specified, neither in the"
1273
                               " opcode nor as a cluster-wide default",
1274
                               errors.ECODE_INVAL)
1275

    
1276
  return iallocator
1277

    
1278

    
1279
class LUClusterPostInit(LogicalUnit):
1280
  """Logical unit for running hooks after cluster initialization.
1281

1282
  """
1283
  HPATH = "cluster-init"
1284
  HTYPE = constants.HTYPE_CLUSTER
1285

    
1286
  def BuildHooksEnv(self):
1287
    """Build hooks env.
1288

1289
    """
1290
    return {
1291
      "OP_TARGET": self.cfg.GetClusterName(),
1292
      }
1293

    
1294
  def BuildHooksNodes(self):
1295
    """Build hooks nodes.
1296

1297
    """
1298
    return ([], [self.cfg.GetMasterNode()])
1299

    
1300
  def Exec(self, feedback_fn):
1301
    """Nothing to do.
1302

1303
    """
1304
    return True
1305

    
1306

    
1307
class LUClusterDestroy(LogicalUnit):
1308
  """Logical unit for destroying the cluster.
1309

1310
  """
1311
  HPATH = "cluster-destroy"
1312
  HTYPE = constants.HTYPE_CLUSTER
1313

    
1314
  def BuildHooksEnv(self):
1315
    """Build hooks env.
1316

1317
    """
1318
    return {
1319
      "OP_TARGET": self.cfg.GetClusterName(),
1320
      }
1321

    
1322
  def BuildHooksNodes(self):
1323
    """Build hooks nodes.
1324

1325
    """
1326
    return ([], [])
1327

    
1328
  def CheckPrereq(self):
1329
    """Check prerequisites.
1330

1331
    This checks whether the cluster is empty.
1332

1333
    Any errors are signaled by raising errors.OpPrereqError.
1334

1335
    """
1336
    master = self.cfg.GetMasterNode()
1337

    
1338
    nodelist = self.cfg.GetNodeList()
1339
    if len(nodelist) != 1 or nodelist[0] != master:
1340
      raise errors.OpPrereqError("There are still %d node(s) in"
1341
                                 " this cluster." % (len(nodelist) - 1),
1342
                                 errors.ECODE_INVAL)
1343
    instancelist = self.cfg.GetInstanceList()
1344
    if instancelist:
1345
      raise errors.OpPrereqError("There are still %d instance(s) in"
1346
                                 " this cluster." % len(instancelist),
1347
                                 errors.ECODE_INVAL)
1348

    
1349
  def Exec(self, feedback_fn):
1350
    """Destroys the cluster.
1351

1352
    """
1353
    master = self.cfg.GetMasterNode()
1354

    
1355
    # Run post hooks on master node before it's removed
1356
    _RunPostHook(self, master)
1357

    
1358
    result = self.rpc.call_node_stop_master(master, False)
1359
    result.Raise("Could not disable the master role")
1360

    
1361
    return master
1362

    
1363

    
1364
def _VerifyCertificate(filename):
1365
  """Verifies a certificate for L{LUClusterVerifyConfig}.
1366

1367
  @type filename: string
1368
  @param filename: Path to PEM file
1369

1370
  """
1371
  try:
1372
    cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
1373
                                           utils.ReadFile(filename))
1374
  except Exception, err: # pylint: disable=W0703
1375
    return (LUClusterVerifyConfig.ETYPE_ERROR,
1376
            "Failed to load X509 certificate %s: %s" % (filename, err))
1377

    
1378
  (errcode, msg) = \
1379
    utils.VerifyX509Certificate(cert, constants.SSL_CERT_EXPIRATION_WARN,
1380
                                constants.SSL_CERT_EXPIRATION_ERROR)
1381

    
1382
  if msg:
1383
    fnamemsg = "While verifying %s: %s" % (filename, msg)
1384
  else:
1385
    fnamemsg = None
1386

    
1387
  if errcode is None:
1388
    return (None, fnamemsg)
1389
  elif errcode == utils.CERT_WARNING:
1390
    return (LUClusterVerifyConfig.ETYPE_WARNING, fnamemsg)
1391
  elif errcode == utils.CERT_ERROR:
1392
    return (LUClusterVerifyConfig.ETYPE_ERROR, fnamemsg)
1393

    
1394
  raise errors.ProgrammerError("Unhandled certificate error code %r" % errcode)
1395

    
1396

    
1397
def _GetAllHypervisorParameters(cluster, instances):
1398
  """Compute the set of all hypervisor parameters.
1399

1400
  @type cluster: L{objects.Cluster}
1401
  @param cluster: the cluster object
1402
  @param instances: list of L{objects.Instance}
1403
  @param instances: additional instances from which to obtain parameters
1404
  @rtype: list of (origin, hypervisor, parameters)
1405
  @return: a list with all parameters found, indicating the hypervisor they
1406
       apply to, and the origin (can be "cluster", "os X", or "instance Y")
1407

1408
  """
1409
  hvp_data = []
1410

    
1411
  for hv_name in cluster.enabled_hypervisors:
1412
    hvp_data.append(("cluster", hv_name, cluster.GetHVDefaults(hv_name)))
1413

    
1414
  for os_name, os_hvp in cluster.os_hvp.items():
1415
    for hv_name, hv_params in os_hvp.items():
1416
      if hv_params:
1417
        full_params = cluster.GetHVDefaults(hv_name, os_name=os_name)
1418
        hvp_data.append(("os %s" % os_name, hv_name, full_params))
1419

    
1420
  # TODO: collapse identical parameter values in a single one
1421
  for instance in instances:
1422
    if instance.hvparams:
1423
      hvp_data.append(("instance %s" % instance.name, instance.hypervisor,
1424
                       cluster.FillHV(instance)))
1425

    
1426
  return hvp_data
1427

    
1428

    
1429
class _VerifyErrors(object):
1430
  """Mix-in for cluster/group verify LUs.
1431

1432
  It provides _Error and _ErrorIf, and updates the self.bad boolean. (Expects
1433
  self.op and self._feedback_fn to be available.)
1434

1435
  """
1436
  TCLUSTER = "cluster"
1437
  TNODE = "node"
1438
  TINSTANCE = "instance"
1439

    
1440
  ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
1441
  ECLUSTERCERT = (TCLUSTER, "ECLUSTERCERT")
1442
  ECLUSTERFILECHECK = (TCLUSTER, "ECLUSTERFILECHECK")
1443
  ECLUSTERDANGLINGNODES = (TNODE, "ECLUSTERDANGLINGNODES")
1444
  ECLUSTERDANGLINGINST = (TNODE, "ECLUSTERDANGLINGINST")
1445
  EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
1446
  EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
1447
  EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
1448
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1449
  EINSTANCEFAULTYDISK = (TINSTANCE, "EINSTANCEFAULTYDISK")
1450
  EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
1451
  EINSTANCESPLITGROUPS = (TINSTANCE, "EINSTANCESPLITGROUPS")
1452
  ENODEDRBD = (TNODE, "ENODEDRBD")
1453
  ENODEDRBDHELPER = (TNODE, "ENODEDRBDHELPER")
1454
  ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
1455
  ENODEHOOKS = (TNODE, "ENODEHOOKS")
1456
  ENODEHV = (TNODE, "ENODEHV")
1457
  ENODELVM = (TNODE, "ENODELVM")
1458
  ENODEN1 = (TNODE, "ENODEN1")
1459
  ENODENET = (TNODE, "ENODENET")
1460
  ENODEOS = (TNODE, "ENODEOS")
1461
  ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
1462
  ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
1463
  ENODERPC = (TNODE, "ENODERPC")
1464
  ENODESSH = (TNODE, "ENODESSH")
1465
  ENODEVERSION = (TNODE, "ENODEVERSION")
1466
  ENODESETUP = (TNODE, "ENODESETUP")
1467
  ENODETIME = (TNODE, "ENODETIME")
1468
  ENODEOOBPATH = (TNODE, "ENODEOOBPATH")
1469

    
1470
  ETYPE_FIELD = "code"
1471
  ETYPE_ERROR = "ERROR"
1472
  ETYPE_WARNING = "WARNING"
1473

    
1474
  def _Error(self, ecode, item, msg, *args, **kwargs):
1475
    """Format an error message.
1476

1477
    Based on the opcode's error_codes parameter, either format a
1478
    parseable error code, or a simpler error string.
1479

1480
    This must be called only from Exec and functions called from Exec.
1481

1482
    """
1483
    ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
1484
    itype, etxt = ecode
1485
    # first complete the msg
1486
    if args:
1487
      msg = msg % args
1488
    # then format the whole message
1489
    if self.op.error_codes: # This is a mix-in. pylint: disable=E1101
1490
      msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1491
    else:
1492
      if item:
1493
        item = " " + item
1494
      else:
1495
        item = ""
1496
      msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1497
    # and finally report it via the feedback_fn
1498
    self._feedback_fn("  - %s" % msg) # Mix-in. pylint: disable=E1101
1499

    
1500
  def _ErrorIf(self, cond, *args, **kwargs):
1501
    """Log an error message if the passed condition is True.
1502

1503
    """
1504
    cond = (bool(cond)
1505
            or self.op.debug_simulate_errors) # pylint: disable=E1101
1506
    if cond:
1507
      self._Error(*args, **kwargs)
1508
    # do not mark the operation as failed for WARN cases only
1509
    if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
1510
      self.bad = self.bad or cond
1511

    
1512

    
1513
class LUClusterVerify(NoHooksLU):
1514
  """Submits all jobs necessary to verify the cluster.
1515

1516
  """
1517
  REQ_BGL = False
1518

    
1519
  def ExpandNames(self):
1520
    self.needed_locks = {}
1521

    
1522
  def Exec(self, feedback_fn):
1523
    jobs = []
1524

    
1525
    if self.op.group_name:
1526
      groups = [self.op.group_name]
1527
      depends_fn = lambda: None
1528
    else:
1529
      groups = self.cfg.GetNodeGroupList()
1530

    
1531
      # Verify global configuration
1532
      jobs.append([opcodes.OpClusterVerifyConfig()])
1533

    
1534
      # Always depend on global verification
1535
      depends_fn = lambda: [(-len(jobs), [])]
1536

    
1537
    jobs.extend([opcodes.OpClusterVerifyGroup(group_name=group,
1538
                                              depends=depends_fn())]
1539
                for group in groups)
1540

    
1541
    # Fix up all parameters
1542
    for op in itertools.chain(*jobs): # pylint: disable=W0142
1543
      op.debug_simulate_errors = self.op.debug_simulate_errors
1544
      op.verbose = self.op.verbose
1545
      op.error_codes = self.op.error_codes
1546
      try:
1547
        op.skip_checks = self.op.skip_checks
1548
      except AttributeError:
1549
        assert not isinstance(op, opcodes.OpClusterVerifyGroup)
1550

    
1551
    return ResultWithJobs(jobs)
1552

    
1553

    
1554
class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
1555
  """Verifies the cluster config.
1556

1557
  """
1558
  REQ_BGL = True
1559

    
1560
  def _VerifyHVP(self, hvp_data):
1561
    """Verifies locally the syntax of the hypervisor parameters.
1562

1563
    """
1564
    for item, hv_name, hv_params in hvp_data:
1565
      msg = ("hypervisor %s parameters syntax check (source %s): %%s" %
1566
             (item, hv_name))
1567
      try:
1568
        hv_class = hypervisor.GetHypervisor(hv_name)
1569
        utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1570
        hv_class.CheckParameterSyntax(hv_params)
1571
      except errors.GenericError, err:
1572
        self._ErrorIf(True, self.ECLUSTERCFG, None, msg % str(err))
1573

    
1574
  def ExpandNames(self):
1575
    # Information can be safely retrieved as the BGL is acquired in exclusive
1576
    # mode
1577
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1578
    self.all_group_info = self.cfg.GetAllNodeGroupsInfo()
1579
    self.all_node_info = self.cfg.GetAllNodesInfo()
1580
    self.all_inst_info = self.cfg.GetAllInstancesInfo()
1581
    self.needed_locks = {}
1582

    
1583
  def Exec(self, feedback_fn):
1584
    """Verify integrity of cluster, performing various test on nodes.
1585

1586
    """
1587
    self.bad = False
1588
    self._feedback_fn = feedback_fn
1589

    
1590
    feedback_fn("* Verifying cluster config")
1591

    
1592
    for msg in self.cfg.VerifyConfig():
1593
      self._ErrorIf(True, self.ECLUSTERCFG, None, msg)
1594

    
1595
    feedback_fn("* Verifying cluster certificate files")
1596

    
1597
    for cert_filename in constants.ALL_CERT_FILES:
1598
      (errcode, msg) = _VerifyCertificate(cert_filename)
1599
      self._ErrorIf(errcode, self.ECLUSTERCERT, None, msg, code=errcode)
1600

    
1601
    feedback_fn("* Verifying hypervisor parameters")
1602

    
1603
    self._VerifyHVP(_GetAllHypervisorParameters(self.cfg.GetClusterInfo(),
1604
                                                self.all_inst_info.values()))
1605

    
1606
    feedback_fn("* Verifying all nodes belong to an existing group")
1607

    
1608
    # We do this verification here because, should this bogus circumstance
1609
    # occur, it would never be caught by VerifyGroup, which only acts on
1610
    # nodes/instances reachable from existing node groups.
1611

    
1612
    dangling_nodes = set(node.name for node in self.all_node_info.values()
1613
                         if node.group not in self.all_group_info)
1614

    
1615
    dangling_instances = {}
1616
    no_node_instances = []
1617

    
1618
    for inst in self.all_inst_info.values():
1619
      if inst.primary_node in dangling_nodes:
1620
        dangling_instances.setdefault(inst.primary_node, []).append(inst.name)
1621
      elif inst.primary_node not in self.all_node_info:
1622
        no_node_instances.append(inst.name)
1623

    
1624
    pretty_dangling = [
1625
        "%s (%s)" %
1626
        (node.name,
1627
         utils.CommaJoin(dangling_instances.get(node.name,
1628
                                                ["no instances"])))
1629
        for node in dangling_nodes]
1630

    
1631
    self._ErrorIf(bool(dangling_nodes), self.ECLUSTERDANGLINGNODES, None,
1632
                  "the following nodes (and their instances) belong to a non"
1633
                  " existing group: %s", utils.CommaJoin(pretty_dangling))
1634

    
1635
    self._ErrorIf(bool(no_node_instances), self.ECLUSTERDANGLINGINST, None,
1636
                  "the following instances have a non-existing primary-node:"
1637
                  " %s", utils.CommaJoin(no_node_instances))
1638

    
1639
    return not self.bad
1640

    
1641

    
1642
class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
1643
  """Verifies the status of a node group.
1644

1645
  """
1646
  HPATH = "cluster-verify"
1647
  HTYPE = constants.HTYPE_CLUSTER
1648
  REQ_BGL = False
1649

    
1650
  _HOOKS_INDENT_RE = re.compile("^", re.M)
1651

    
1652
  class NodeImage(object):
1653
    """A class representing the logical and physical status of a node.
1654

1655
    @type name: string
1656
    @ivar name: the node name to which this object refers
1657
    @ivar volumes: a structure as returned from
1658
        L{ganeti.backend.GetVolumeList} (runtime)
1659
    @ivar instances: a list of running instances (runtime)
1660
    @ivar pinst: list of configured primary instances (config)
1661
    @ivar sinst: list of configured secondary instances (config)
1662
    @ivar sbp: dictionary of {primary-node: list of instances} for all
1663
        instances for which this node is secondary (config)
1664
    @ivar mfree: free memory, as reported by hypervisor (runtime)
1665
    @ivar dfree: free disk, as reported by the node (runtime)
1666
    @ivar offline: the offline status (config)
1667
    @type rpc_fail: boolean
1668
    @ivar rpc_fail: whether the RPC verify call was successfull (overall,
1669
        not whether the individual keys were correct) (runtime)
1670
    @type lvm_fail: boolean
1671
    @ivar lvm_fail: whether the RPC call didn't return valid LVM data
1672
    @type hyp_fail: boolean
1673
    @ivar hyp_fail: whether the RPC call didn't return the instance list
1674
    @type ghost: boolean
1675
    @ivar ghost: whether this is a known node or not (config)
1676
    @type os_fail: boolean
1677
    @ivar os_fail: whether the RPC call didn't return valid OS data
1678
    @type oslist: list
1679
    @ivar oslist: list of OSes as diagnosed by DiagnoseOS
1680
    @type vm_capable: boolean
1681
    @ivar vm_capable: whether the node can host instances
1682

1683
    """
1684
    def __init__(self, offline=False, name=None, vm_capable=True):
1685
      self.name = name
1686
      self.volumes = {}
1687
      self.instances = []
1688
      self.pinst = []
1689
      self.sinst = []
1690
      self.sbp = {}
1691
      self.mfree = 0
1692
      self.dfree = 0
1693
      self.offline = offline
1694
      self.vm_capable = vm_capable
1695
      self.rpc_fail = False
1696
      self.lvm_fail = False
1697
      self.hyp_fail = False
1698
      self.ghost = False
1699
      self.os_fail = False
1700
      self.oslist = {}
1701

    
1702
  def ExpandNames(self):
1703
    # This raises errors.OpPrereqError on its own:
1704
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
1705

    
1706
    # Get instances in node group; this is unsafe and needs verification later
1707
    inst_names = self.cfg.GetNodeGroupInstances(self.group_uuid)
1708

    
1709
    self.needed_locks = {
1710
      locking.LEVEL_INSTANCE: inst_names,
1711
      locking.LEVEL_NODEGROUP: [self.group_uuid],
1712
      locking.LEVEL_NODE: [],
1713
      }
1714

    
1715
    self.share_locks = _ShareAll()
1716

    
1717
  def DeclareLocks(self, level):
1718
    if level == locking.LEVEL_NODE:
1719
      # Get members of node group; this is unsafe and needs verification later
1720
      nodes = set(self.cfg.GetNodeGroup(self.group_uuid).members)
1721

    
1722
      all_inst_info = self.cfg.GetAllInstancesInfo()
1723

    
1724
      # In Exec(), we warn about mirrored instances that have primary and
1725
      # secondary living in separate node groups. To fully verify that
1726
      # volumes for these instances are healthy, we will need to do an
1727
      # extra call to their secondaries. We ensure here those nodes will
1728
      # be locked.
1729
      for inst in self.owned_locks(locking.LEVEL_INSTANCE):
1730
        # Important: access only the instances whose lock is owned
1731
        if all_inst_info[inst].disk_template in constants.DTS_INT_MIRROR:
1732
          nodes.update(all_inst_info[inst].secondary_nodes)
1733

    
1734
      self.needed_locks[locking.LEVEL_NODE] = nodes
1735

    
1736
  def CheckPrereq(self):
1737
    assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
1738
    self.group_info = self.cfg.GetNodeGroup(self.group_uuid)
1739

    
1740
    group_nodes = set(self.group_info.members)
1741
    group_instances = self.cfg.GetNodeGroupInstances(self.group_uuid)
1742

    
1743
    unlocked_nodes = \
1744
        group_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
1745

    
1746
    unlocked_instances = \
1747
        group_instances.difference(self.owned_locks(locking.LEVEL_INSTANCE))
1748

    
1749
    if unlocked_nodes:
1750
      raise errors.OpPrereqError("Missing lock for nodes: %s" %
1751
                                 utils.CommaJoin(unlocked_nodes))
1752

    
1753
    if unlocked_instances:
1754
      raise errors.OpPrereqError("Missing lock for instances: %s" %
1755
                                 utils.CommaJoin(unlocked_instances))
1756

    
1757
    self.all_node_info = self.cfg.GetAllNodesInfo()
1758
    self.all_inst_info = self.cfg.GetAllInstancesInfo()
1759

    
1760
    self.my_node_names = utils.NiceSort(group_nodes)
1761
    self.my_inst_names = utils.NiceSort(group_instances)
1762

    
1763
    self.my_node_info = dict((name, self.all_node_info[name])
1764
                             for name in self.my_node_names)
1765

    
1766
    self.my_inst_info = dict((name, self.all_inst_info[name])
1767
                             for name in self.my_inst_names)
1768

    
1769
    # We detect here the nodes that will need the extra RPC calls for verifying
1770
    # split LV volumes; they should be locked.
1771
    extra_lv_nodes = set()
1772

    
1773
    for inst in self.my_inst_info.values():
1774
      if inst.disk_template in constants.DTS_INT_MIRROR:
1775
        group = self.my_node_info[inst.primary_node].group
1776
        for nname in inst.secondary_nodes:
1777
          if self.all_node_info[nname].group != group:
1778
            extra_lv_nodes.add(nname)
1779

    
1780
    unlocked_lv_nodes = \
1781
        extra_lv_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
1782

    
1783
    if unlocked_lv_nodes:
1784
      raise errors.OpPrereqError("these nodes could be locked: %s" %
1785
                                 utils.CommaJoin(unlocked_lv_nodes))
1786
    self.extra_lv_nodes = list(extra_lv_nodes)
1787

    
1788
  def _VerifyNode(self, ninfo, nresult):
1789
    """Perform some basic validation on data returned from a node.
1790

1791
      - check the result data structure is well formed and has all the
1792
        mandatory fields
1793
      - check ganeti version
1794

1795
    @type ninfo: L{objects.Node}
1796
    @param ninfo: the node to check
1797
    @param nresult: the results from the node
1798
    @rtype: boolean
1799
    @return: whether overall this call was successful (and we can expect
1800
         reasonable values in the respose)
1801

1802
    """
1803
    node = ninfo.name
1804
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
1805

    
1806
    # main result, nresult should be a non-empty dict
1807
    test = not nresult or not isinstance(nresult, dict)
1808
    _ErrorIf(test, self.ENODERPC, node,
1809
                  "unable to verify node: no data returned")
1810
    if test:
1811
      return False
1812

    
1813
    # compares ganeti version
1814
    local_version = constants.PROTOCOL_VERSION
1815
    remote_version = nresult.get("version", None)
1816
    test = not (remote_version and
1817
                isinstance(remote_version, (list, tuple)) and
1818
                len(remote_version) == 2)
1819
    _ErrorIf(test, self.ENODERPC, node,
1820
             "connection to node returned invalid data")
1821
    if test:
1822
      return False
1823

    
1824
    test = local_version != remote_version[0]
1825
    _ErrorIf(test, self.ENODEVERSION, node,
1826
             "incompatible protocol versions: master %s,"
1827
             " node %s", local_version, remote_version[0])
1828
    if test:
1829
      return False
1830

    
1831
    # node seems compatible, we can actually try to look into its results
1832

    
1833
    # full package version
1834
    self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
1835
                  self.ENODEVERSION, node,
1836
                  "software version mismatch: master %s, node %s",
1837
                  constants.RELEASE_VERSION, remote_version[1],
1838
                  code=self.ETYPE_WARNING)
1839

    
1840
    hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
1841
    if ninfo.vm_capable and isinstance(hyp_result, dict):
1842
      for hv_name, hv_result in hyp_result.iteritems():
1843
        test = hv_result is not None
1844
        _ErrorIf(test, self.ENODEHV, node,
1845
                 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1846

    
1847
    hvp_result = nresult.get(constants.NV_HVPARAMS, None)
1848
    if ninfo.vm_capable and isinstance(hvp_result, list):
1849
      for item, hv_name, hv_result in hvp_result:
1850
        _ErrorIf(True, self.ENODEHV, node,
1851
                 "hypervisor %s parameter verify failure (source %s): %s",
1852
                 hv_name, item, hv_result)
1853

    
1854
    test = nresult.get(constants.NV_NODESETUP,
1855
                       ["Missing NODESETUP results"])
1856
    _ErrorIf(test, self.ENODESETUP, node, "node setup error: %s",
1857
             "; ".join(test))
1858

    
1859
    return True
1860

    
1861
  def _VerifyNodeTime(self, ninfo, nresult,
1862
                      nvinfo_starttime, nvinfo_endtime):
1863
    """Check the node time.
1864

1865
    @type ninfo: L{objects.Node}
1866
    @param ninfo: the node to check
1867
    @param nresult: the remote results for the node
1868
    @param nvinfo_starttime: the start time of the RPC call
1869
    @param nvinfo_endtime: the end time of the RPC call
1870

1871
    """
1872
    node = ninfo.name
1873
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
1874

    
1875
    ntime = nresult.get(constants.NV_TIME, None)
1876
    try:
1877
      ntime_merged = utils.MergeTime(ntime)
1878
    except (ValueError, TypeError):
1879
      _ErrorIf(True, self.ENODETIME, node, "Node returned invalid time")
1880
      return
1881

    
1882
    if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
1883
      ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
1884
    elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
1885
      ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
1886
    else:
1887
      ntime_diff = None
1888

    
1889
    _ErrorIf(ntime_diff is not None, self.ENODETIME, node,
1890
             "Node time diverges by at least %s from master node time",
1891
             ntime_diff)
1892

    
1893
  def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
1894
    """Check the node LVM results.
1895

1896
    @type ninfo: L{objects.Node}
1897
    @param ninfo: the node to check
1898
    @param nresult: the remote results for the node
1899
    @param vg_name: the configured VG name
1900

1901
    """
1902
    if vg_name is None:
1903
      return
1904

    
1905
    node = ninfo.name
1906
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
1907

    
1908
    # checks vg existence and size > 20G
1909
    vglist = nresult.get(constants.NV_VGLIST, None)
1910
    test = not vglist
1911
    _ErrorIf(test, self.ENODELVM, node, "unable to check volume groups")
1912
    if not test:
1913
      vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
1914
                                            constants.MIN_VG_SIZE)
1915
      _ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
1916

    
1917
    # check pv names
1918
    pvlist = nresult.get(constants.NV_PVLIST, None)
1919
    test = pvlist is None
1920
    _ErrorIf(test, self.ENODELVM, node, "Can't get PV list from node")
1921
    if not test:
1922
      # check that ':' is not present in PV names, since it's a
1923
      # special character for lvcreate (denotes the range of PEs to
1924
      # use on the PV)
1925
      for _, pvname, owner_vg in pvlist:
1926
        test = ":" in pvname
1927
        _ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV"
1928
                 " '%s' of VG '%s'", pvname, owner_vg)
1929

    
1930
  def _VerifyNodeBridges(self, ninfo, nresult, bridges):
1931
    """Check the node bridges.
1932

1933
    @type ninfo: L{objects.Node}
1934
    @param ninfo: the node to check
1935
    @param nresult: the remote results for the node
1936
    @param bridges: the expected list of bridges
1937

1938
    """
1939
    if not bridges:
1940
      return
1941

    
1942
    node = ninfo.name
1943
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
1944

    
1945
    missing = nresult.get(constants.NV_BRIDGES, None)
1946
    test = not isinstance(missing, list)
1947
    _ErrorIf(test, self.ENODENET, node,
1948
             "did not return valid bridge information")
1949
    if not test:
1950
      _ErrorIf(bool(missing), self.ENODENET, node, "missing bridges: %s" %
1951
               utils.CommaJoin(sorted(missing)))
1952

    
1953
  def _VerifyNodeNetwork(self, ninfo, nresult):
1954
    """Check the node network connectivity results.
1955

1956
    @type ninfo: L{objects.Node}
1957
    @param ninfo: the node to check
1958
    @param nresult: the remote results for the node
1959

1960
    """
1961
    node = ninfo.name
1962
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
1963

    
1964
    test = constants.NV_NODELIST not in nresult
1965
    _ErrorIf(test, self.ENODESSH, node,
1966
             "node hasn't returned node ssh connectivity data")
1967
    if not test:
1968
      if nresult[constants.NV_NODELIST]:
1969
        for a_node, a_msg in nresult[constants.NV_NODELIST].items():
1970
          _ErrorIf(True, self.ENODESSH, node,
1971
                   "ssh communication with node '%s': %s", a_node, a_msg)
1972

    
1973
    test = constants.NV_NODENETTEST not in nresult
1974
    _ErrorIf(test, self.ENODENET, node,
1975
             "node hasn't returned node tcp connectivity data")
1976
    if not test:
1977
      if nresult[constants.NV_NODENETTEST]:
1978
        nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
1979
        for anode in nlist:
1980
          _ErrorIf(True, self.ENODENET, node,
1981
                   "tcp communication with node '%s': %s",
1982
                   anode, nresult[constants.NV_NODENETTEST][anode])
1983

    
1984
    test = constants.NV_MASTERIP not in nresult
1985
    _ErrorIf(test, self.ENODENET, node,
1986
             "node hasn't returned node master IP reachability data")
1987
    if not test:
1988
      if not nresult[constants.NV_MASTERIP]:
1989
        if node == self.master_node:
1990
          msg = "the master node cannot reach the master IP (not configured?)"
1991
        else:
1992
          msg = "cannot reach the master IP"
1993
        _ErrorIf(True, self.ENODENET, node, msg)
1994

    
1995
  def _VerifyInstance(self, instance, instanceconfig, node_image,
1996
                      diskstatus):
1997
    """Verify an instance.
1998

1999
    This function checks to see if the required block devices are
2000
    available on the instance's node.
2001

2002
    """
2003
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
2004
    node_current = instanceconfig.primary_node
2005

    
2006
    node_vol_should = {}
2007
    instanceconfig.MapLVsByNode(node_vol_should)
2008

    
2009
    for node in node_vol_should:
2010
      n_img = node_image[node]
2011
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
2012
        # ignore missing volumes on offline or broken nodes
2013
        continue
2014
      for volume in node_vol_should[node]:
2015
        test = volume not in n_img.volumes
2016
        _ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
2017
                 "volume %s missing on node %s", volume, node)
2018

    
2019
    if instanceconfig.admin_up:
2020
      pri_img = node_image[node_current]
2021
      test = instance not in pri_img.instances and not pri_img.offline
2022
      _ErrorIf(test, self.EINSTANCEDOWN, instance,
2023
               "instance not running on its primary node %s",
2024
               node_current)
2025

    
2026
    diskdata = [(nname, success, status, idx)
2027
                for (nname, disks) in diskstatus.items()
2028
                for idx, (success, status) in enumerate(disks)]
2029

    
2030
    for nname, success, bdev_status, idx in diskdata:
2031
      # the 'ghost node' construction in Exec() ensures that we have a
2032
      # node here
2033
      snode = node_image[nname]
2034
      bad_snode = snode.ghost or snode.offline
2035
      _ErrorIf(instanceconfig.admin_up and not success and not bad_snode,
2036
               self.EINSTANCEFAULTYDISK, instance,
2037
               "couldn't retrieve status for disk/%s on %s: %s",
2038
               idx, nname, bdev_status)
2039
      _ErrorIf((instanceconfig.admin_up and success and
2040
                bdev_status.ldisk_status == constants.LDS_FAULTY),
2041
               self.EINSTANCEFAULTYDISK, instance,
2042
               "disk/%s on %s is faulty", idx, nname)
2043

    
2044
  def _VerifyOrphanVolumes(self, node_vol_should, node_image, reserved):
2045
    """Verify if there are any unknown volumes in the cluster.
2046

2047
    The .os, .swap and backup volumes are ignored. All other volumes are
2048
    reported as unknown.
2049

2050
    @type reserved: L{ganeti.utils.FieldSet}
2051
    @param reserved: a FieldSet of reserved volume names
2052

2053
    """
2054
    for node, n_img in node_image.items():
2055
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
2056
        # skip non-healthy nodes
2057
        continue
2058
      for volume in n_img.volumes:
2059
        test = ((node not in node_vol_should or
2060
                volume not in node_vol_should[node]) and
2061
                not reserved.Matches(volume))
2062
        self._ErrorIf(test, self.ENODEORPHANLV, node,
2063
                      "volume %s is unknown", volume)
2064

    
2065
  def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
2066
    """Verify N+1 Memory Resilience.
2067

2068
    Check that if one single node dies we can still start all the
2069
    instances it was primary for.
2070

2071
    """
2072
    cluster_info = self.cfg.GetClusterInfo()
2073
    for node, n_img in node_image.items():
2074
      # This code checks that every node which is now listed as
2075
      # secondary has enough memory to host all instances it is
2076
      # supposed to should a single other node in the cluster fail.
2077
      # FIXME: not ready for failover to an arbitrary node
2078
      # FIXME: does not support file-backed instances
2079
      # WARNING: we currently take into account down instances as well
2080
      # as up ones, considering that even if they're down someone
2081
      # might want to start them even in the event of a node failure.
2082
      if n_img.offline:
2083
        # we're skipping offline nodes from the N+1 warning, since
2084
        # most likely we don't have good memory infromation from them;
2085
        # we already list instances living on such nodes, and that's
2086
        # enough warning
2087
        continue
2088
      for prinode, instances in n_img.sbp.items():
2089
        needed_mem = 0
2090
        for instance in instances:
2091
          bep = cluster_info.FillBE(instance_cfg[instance])
2092
          if bep[constants.BE_AUTO_BALANCE]:
2093
            needed_mem += bep[constants.BE_MEMORY]
2094
        test = n_img.mfree < needed_mem
2095
        self._ErrorIf(test, self.ENODEN1, node,
2096
                      "not enough memory to accomodate instance failovers"
2097
                      " should node %s fail (%dMiB needed, %dMiB available)",
2098
                      prinode, needed_mem, n_img.mfree)
2099

    
2100
  @classmethod
2101
  def _VerifyFiles(cls, errorif, nodeinfo, master_node, all_nvinfo,
2102
                   (files_all, files_all_opt, files_mc, files_vm)):
2103
    """Verifies file checksums collected from all nodes.
2104

2105
    @param errorif: Callback for reporting errors
2106
    @param nodeinfo: List of L{objects.Node} objects
2107
    @param master_node: Name of master node
2108
    @param all_nvinfo: RPC results
2109

2110
    """
2111
    node_names = frozenset(node.name for node in nodeinfo if not node.offline)
2112

    
2113
    assert master_node in node_names
2114
    assert (len(files_all | files_all_opt | files_mc | files_vm) ==
2115
            sum(map(len, [files_all, files_all_opt, files_mc, files_vm]))), \
2116
           "Found file listed in more than one file list"
2117

    
2118
    # Define functions determining which nodes to consider for a file
2119
    file2nodefn = dict([(filename, fn)
2120
      for (files, fn) in [(files_all, None),
2121
                          (files_all_opt, None),
2122
                          (files_mc, lambda node: (node.master_candidate or
2123
                                                   node.name == master_node)),
2124
                          (files_vm, lambda node: node.vm_capable)]
2125
      for filename in files])
2126

    
2127
    fileinfo = dict((filename, {}) for filename in file2nodefn.keys())
2128

    
2129
    for node in nodeinfo:
2130
      if node.offline:
2131
        continue
2132

    
2133
      nresult = all_nvinfo[node.name]
2134

    
2135
      if nresult.fail_msg or not nresult.payload:
2136
        node_files = None
2137
      else:
2138
        node_files = nresult.payload.get(constants.NV_FILELIST, None)
2139

    
2140
      test = not (node_files and isinstance(node_files, dict))
2141
      errorif(test, cls.ENODEFILECHECK, node.name,
2142
              "Node did not return file checksum data")
2143
      if test:
2144
        continue
2145

    
2146
      for (filename, checksum) in node_files.items():
2147
        # Check if the file should be considered for a node
2148
        fn = file2nodefn[filename]
2149
        if fn is None or fn(node):
2150
          fileinfo[filename].setdefault(checksum, set()).add(node.name)
2151

    
2152
    for (filename, checksums) in fileinfo.items():
2153
      assert compat.all(len(i) > 10 for i in checksums), "Invalid checksum"
2154

    
2155
      # Nodes having the file
2156
      with_file = frozenset(node_name
2157
                            for nodes in fileinfo[filename].values()
2158
                            for node_name in nodes)
2159

    
2160
      # Nodes missing file
2161
      missing_file = node_names - with_file
2162

    
2163
      if filename in files_all_opt:
2164
        # All or no nodes
2165
        errorif(missing_file and missing_file != node_names,
2166
                cls.ECLUSTERFILECHECK, None,
2167
                "File %s is optional, but it must exist on all or no"
2168
                " nodes (not found on %s)",
2169
                filename, utils.CommaJoin(utils.NiceSort(missing_file)))
2170
      else:
2171
        errorif(missing_file, cls.ECLUSTERFILECHECK, None,
2172
                "File %s is missing from node(s) %s", filename,
2173
                utils.CommaJoin(utils.NiceSort(missing_file)))
2174

    
2175
      # See if there are multiple versions of the file
2176
      test = len(checksums) > 1
2177
      if test:
2178
        variants = ["variant %s on %s" %
2179
                    (idx + 1, utils.CommaJoin(utils.NiceSort(nodes)))
2180
                    for (idx, (checksum, nodes)) in
2181
                      enumerate(sorted(checksums.items()))]
2182
      else:
2183
        variants = []
2184

    
2185
      errorif(test, cls.ECLUSTERFILECHECK, None,
2186
              "File %s found with %s different checksums (%s)",
2187
              filename, len(checksums), "; ".join(variants))
2188

    
2189
  def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_helper,
2190
                      drbd_map):
2191
    """Verifies and the node DRBD status.
2192

2193
    @type ninfo: L{objects.Node}
2194
    @param ninfo: the node to check
2195
    @param nresult: the remote results for the node
2196
    @param instanceinfo: the dict of instances
2197
    @param drbd_helper: the configured DRBD usermode helper
2198
    @param drbd_map: the DRBD map as returned by
2199
        L{ganeti.config.ConfigWriter.ComputeDRBDMap}
2200

2201
    """
2202
    node = ninfo.name
2203
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
2204

    
2205
    if drbd_helper:
2206
      helper_result = nresult.get(constants.NV_DRBDHELPER, None)
2207
      test = (helper_result == None)
2208
      _ErrorIf(test, self.ENODEDRBDHELPER, node,
2209
               "no drbd usermode helper returned")
2210
      if helper_result:
2211
        status, payload = helper_result
2212
        test = not status
2213
        _ErrorIf(test, self.ENODEDRBDHELPER, node,
2214
                 "drbd usermode helper check unsuccessful: %s", payload)
2215
        test = status and (payload != drbd_helper)
2216
        _ErrorIf(test, self.ENODEDRBDHELPER, node,
2217
                 "wrong drbd usermode helper: %s", payload)
2218

    
2219
    # compute the DRBD minors
2220
    node_drbd = {}
2221
    for minor, instance in drbd_map[node].items():
2222
      test = instance not in instanceinfo
2223
      _ErrorIf(test, self.ECLUSTERCFG, None,
2224
               "ghost instance '%s' in temporary DRBD map", instance)
2225
        # ghost instance should not be running, but otherwise we
2226
        # don't give double warnings (both ghost instance and
2227
        # unallocated minor in use)
2228
      if test:
2229
        node_drbd[minor] = (instance, False)
2230
      else:
2231
        instance = instanceinfo[instance]
2232
        node_drbd[minor] = (instance.name, instance.admin_up)
2233

    
2234
    # and now check them
2235
    used_minors = nresult.get(constants.NV_DRBDLIST, [])
2236
    test = not isinstance(used_minors, (tuple, list))
2237
    _ErrorIf(test, self.ENODEDRBD, node,
2238
             "cannot parse drbd status file: %s", str(used_minors))
2239
    if test:
2240
      # we cannot check drbd status
2241
      return
2242

    
2243
    for minor, (iname, must_exist) in node_drbd.items():
2244
      test = minor not in used_minors and must_exist
2245
      _ErrorIf(test, self.ENODEDRBD, node,
2246
               "drbd minor %d of instance %s is not active", minor, iname)
2247
    for minor in used_minors:
2248
      test = minor not in node_drbd
2249
      _ErrorIf(test, self.ENODEDRBD, node,
2250
               "unallocated drbd minor %d is in use", minor)
2251

    
2252
  def _UpdateNodeOS(self, ninfo, nresult, nimg):
2253
    """Builds the node OS structures.
2254

2255
    @type ninfo: L{objects.Node}
2256
    @param ninfo: the node to check
2257
    @param nresult: the remote results for the node
2258
    @param nimg: the node image object
2259

2260
    """
2261
    node = ninfo.name
2262
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
2263

    
2264
    remote_os = nresult.get(constants.NV_OSLIST, None)
2265
    test = (not isinstance(remote_os, list) or
2266
            not compat.all(isinstance(v, list) and len(v) == 7
2267
                           for v in remote_os))
2268

    
2269
    _ErrorIf(test, self.ENODEOS, node,
2270
             "node hasn't returned valid OS data")
2271

    
2272
    nimg.os_fail = test
2273

    
2274
    if test:
2275
      return
2276

    
2277
    os_dict = {}
2278

    
2279
    for (name, os_path, status, diagnose,
2280
         variants, parameters, api_ver) in nresult[constants.NV_OSLIST]:
2281

    
2282
      if name not in os_dict:
2283
        os_dict[name] = []
2284

    
2285
      # parameters is a list of lists instead of list of tuples due to
2286
      # JSON lacking a real tuple type, fix it:
2287
      parameters = [tuple(v) for v in parameters]
2288
      os_dict[name].append((os_path, status, diagnose,
2289
                            set(variants), set(parameters), set(api_ver)))
2290

    
2291
    nimg.oslist = os_dict
2292

    
2293
  def _VerifyNodeOS(self, ninfo, nimg, base):
2294
    """Verifies the node OS list.
2295

2296
    @type ninfo: L{objects.Node}
2297
    @param ninfo: the node to check
2298
    @param nimg: the node image object
2299
    @param base: the 'template' node we match against (e.g. from the master)
2300

2301
    """
2302
    node = ninfo.name
2303
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
2304

    
2305
    assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?"
2306

    
2307
    beautify_params = lambda l: ["%s: %s" % (k, v) for (k, v) in l]
2308
    for os_name, os_data in nimg.oslist.items():
2309
      assert os_data, "Empty OS status for OS %s?!" % os_name
2310
      f_path, f_status, f_diag, f_var, f_param, f_api = os_data[0]
2311
      _ErrorIf(not f_status, self.ENODEOS, node,
2312
               "Invalid OS %s (located at %s): %s", os_name, f_path, f_diag)
2313
      _ErrorIf(len(os_data) > 1, self.ENODEOS, node,
2314
               "OS '%s' has multiple entries (first one shadows the rest): %s",
2315
               os_name, utils.CommaJoin([v[0] for v in os_data]))
2316
      # comparisons with the 'base' image
2317
      test = os_name not in base.oslist
2318
      _ErrorIf(test, self.ENODEOS, node,
2319
               "Extra OS %s not present on reference node (%s)",
2320
               os_name, base.name)
2321
      if test:
2322
        continue
2323
      assert base.oslist[os_name], "Base node has empty OS status?"
2324
      _, b_status, _, b_var, b_param, b_api = base.oslist[os_name][0]
2325
      if not b_status:
2326
        # base OS is invalid, skipping
2327
        continue
2328
      for kind, a, b in [("API version", f_api, b_api),
2329
                         ("variants list", f_var, b_var),
2330
                         ("parameters", beautify_params(f_param),
2331
                          beautify_params(b_param))]:
2332
        _ErrorIf(a != b, self.ENODEOS, node,
2333
                 "OS %s for %s differs from reference node %s: [%s] vs. [%s]",
2334
                 kind, os_name, base.name,
2335
                 utils.CommaJoin(sorted(a)), utils.CommaJoin(sorted(b)))
2336

    
2337
    # check any missing OSes
2338
    missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
2339
    _ErrorIf(missing, self.ENODEOS, node,
2340
             "OSes present on reference node %s but missing on this node: %s",
2341
             base.name, utils.CommaJoin(missing))
2342

    
2343
  def _VerifyOob(self, ninfo, nresult):
2344
    """Verifies out of band functionality of a node.
2345

2346
    @type ninfo: L{objects.Node}
2347
    @param ninfo: the node to check
2348
    @param nresult: the remote results for the node
2349

2350
    """
2351
    node = ninfo.name
2352
    # We just have to verify the paths on master and/or master candidates
2353
    # as the oob helper is invoked on the master
2354
    if ((ninfo.master_candidate or ninfo.master_capable) and
2355
        constants.NV_OOB_PATHS in nresult):
2356
      for path_result in nresult[constants.NV_OOB_PATHS]:
2357
        self._ErrorIf(path_result, self.ENODEOOBPATH, node, path_result)
2358

    
2359
  def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
2360
    """Verifies and updates the node volume data.
2361

2362
    This function will update a L{NodeImage}'s internal structures
2363
    with data from the remote call.
2364

2365
    @type ninfo: L{objects.Node}
2366
    @param ninfo: the node to check
2367
    @param nresult: the remote results for the node
2368
    @param nimg: the node image object
2369
    @param vg_name: the configured VG name
2370

2371
    """
2372
    node = ninfo.name
2373
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
2374

    
2375
    nimg.lvm_fail = True
2376
    lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
2377
    if vg_name is None:
2378
      pass
2379
    elif isinstance(lvdata, basestring):
2380
      _ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s",
2381
               utils.SafeEncode(lvdata))
2382
    elif not isinstance(lvdata, dict):
2383
      _ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)")
2384
    else:
2385
      nimg.volumes = lvdata
2386
      nimg.lvm_fail = False
2387

    
2388
  def _UpdateNodeInstances(self, ninfo, nresult, nimg):
2389
    """Verifies and updates the node instance list.
2390

2391
    If the listing was successful, then updates this node's instance
2392
    list. Otherwise, it marks the RPC call as failed for the instance
2393
    list key.
2394

2395
    @type ninfo: L{objects.Node}
2396
    @param ninfo: the node to check
2397
    @param nresult: the remote results for the node
2398
    @param nimg: the node image object
2399

2400
    """
2401
    idata = nresult.get(constants.NV_INSTANCELIST, None)
2402
    test = not isinstance(idata, list)
2403
    self._ErrorIf(test, self.ENODEHV, ninfo.name, "rpc call to node failed"
2404
                  " (instancelist): %s", utils.SafeEncode(str(idata)))
2405
    if test:
2406
      nimg.hyp_fail = True
2407
    else:
2408
      nimg.instances = idata
2409

    
2410
  def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
2411
    """Verifies and computes a node information map
2412

2413
    @type ninfo: L{objects.Node}
2414
    @param ninfo: the node to check
2415
    @param nresult: the remote results for the node
2416
    @param nimg: the node image object
2417
    @param vg_name: the configured VG name
2418

2419
    """
2420
    node = ninfo.name
2421
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
2422

    
2423
    # try to read free memory (from the hypervisor)
2424
    hv_info = nresult.get(constants.NV_HVINFO, None)
2425
    test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
2426
    _ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)")
2427
    if not test:
2428
      try:
2429
        nimg.mfree = int(hv_info["memory_free"])
2430
      except (ValueError, TypeError):
2431
        _ErrorIf(True, self.ENODERPC, node,
2432
                 "node returned invalid nodeinfo, check hypervisor")
2433

    
2434
    # FIXME: devise a free space model for file based instances as well
2435
    if vg_name is not None:
2436
      test = (constants.NV_VGLIST not in nresult or
2437
              vg_name not in nresult[constants.NV_VGLIST])
2438
      _ErrorIf(test, self.ENODELVM, node,
2439
               "node didn't return data for the volume group '%s'"
2440
               " - it is either missing or broken", vg_name)
2441
      if not test:
2442
        try:
2443
          nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
2444
        except (ValueError, TypeError):
2445
          _ErrorIf(True, self.ENODERPC, node,
2446
                   "node returned invalid LVM info, check LVM status")
2447

    
2448
  def _CollectDiskInfo(self, nodelist, node_image, instanceinfo):
2449
    """Gets per-disk status information for all instances.
2450

2451
    @type nodelist: list of strings
2452
    @param nodelist: Node names
2453
    @type node_image: dict of (name, L{objects.Node})
2454
    @param node_image: Node objects
2455
    @type instanceinfo: dict of (name, L{objects.Instance})
2456
    @param instanceinfo: Instance objects
2457
    @rtype: {instance: {node: [(succes, payload)]}}
2458
    @return: a dictionary of per-instance dictionaries with nodes as
2459
        keys and disk information as values; the disk information is a
2460
        list of tuples (success, payload)
2461

2462
    """
2463
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
2464

    
2465
    node_disks = {}
2466
    node_disks_devonly = {}
2467
    diskless_instances = set()
2468
    diskless = constants.DT_DISKLESS
2469

    
2470
    for nname in nodelist:
2471
      node_instances = list(itertools.chain(node_image[nname].pinst,
2472
                                            node_image[nname].sinst))
2473
      diskless_instances.update(inst for inst in node_instances
2474
                                if instanceinfo[inst].disk_template == diskless)
2475
      disks = [(inst, disk)
2476
               for inst in node_instances
2477
               for disk in instanceinfo[inst].disks]
2478

    
2479
      if not disks:
2480
        # No need to collect data
2481
        continue
2482

    
2483
      node_disks[nname] = disks
2484

    
2485
      # Creating copies as SetDiskID below will modify the objects and that can
2486
      # lead to incorrect data returned from nodes
2487
      devonly = [dev.Copy() for (_, dev) in disks]
2488

    
2489
      for dev in devonly:
2490
        self.cfg.SetDiskID(dev, nname)
2491

    
2492
      node_disks_devonly[nname] = devonly
2493

    
2494
    assert len(node_disks) == len(node_disks_devonly)
2495

    
2496
    # Collect data from all nodes with disks
2497
    result = self.rpc.call_blockdev_getmirrorstatus_multi(node_disks.keys(),
2498
                                                          node_disks_devonly)
2499

    
2500
    assert len(result) == len(node_disks)
2501

    
2502
    instdisk = {}
2503

    
2504
    for (nname, nres) in result.items():
2505
      disks = node_disks[nname]
2506

    
2507
      if nres.offline:
2508
        # No data from this node
2509
        data = len(disks) * [(False, "node offline")]
2510
      else:
2511
        msg = nres.fail_msg
2512
        _ErrorIf(msg, self.ENODERPC, nname,
2513
                 "while getting disk information: %s", msg)
2514
        if msg:
2515
          # No data from this node
2516
          data = len(disks) * [(False, msg)]
2517
        else:
2518
          data = []
2519
          for idx, i in enumerate(nres.payload):
2520
            if isinstance(i, (tuple, list)) and len(i) == 2:
2521
              data.append(i)
2522
            else:
2523
              logging.warning("Invalid result from node %s, entry %d: %s",
2524
                              nname, idx, i)
2525
              data.append((False, "Invalid result from the remote node"))
2526

    
2527
      for ((inst, _), status) in zip(disks, data):
2528
        instdisk.setdefault(inst, {}).setdefault(nname, []).append(status)
2529

    
2530
    # Add empty entries for diskless instances.
2531
    for inst in diskless_instances:
2532
      assert inst not in instdisk
2533
      instdisk[inst] = {}
2534

    
2535
    assert compat.all(len(statuses) == len(instanceinfo[inst].disks) and
2536
                      len(nnames) <= len(instanceinfo[inst].all_nodes) and
2537
                      compat.all(isinstance(s, (tuple, list)) and
2538
                                 len(s) == 2 for s in statuses)
2539
                      for inst, nnames in instdisk.items()
2540
                      for nname, statuses in nnames.items())
2541
    assert set(instdisk) == set(instanceinfo), "instdisk consistency failure"
2542

    
2543
    return instdisk
2544

    
2545
  def BuildHooksEnv(self):
2546
    """Build hooks env.
2547

2548
    Cluster-Verify hooks just ran in the post phase and their failure makes
2549
    the output be logged in the verify output and the verification to fail.
2550

2551
    """
2552
    env = {
2553
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
2554
      }
2555

    
2556
    env.update(("NODE_TAGS_%s" % node.name, " ".join(node.GetTags()))
2557
               for node in self.my_node_info.values())
2558

    
2559
    return env
2560

    
2561
  def BuildHooksNodes(self):
2562
    """Build hooks nodes.
2563

2564
    """
2565
    return ([], self.my_node_names)
2566

    
2567
  def Exec(self, feedback_fn):
2568
    """Verify integrity of the node group, performing various test on nodes.
2569

2570
    """
2571
    # This method has too many local variables. pylint: disable=R0914
2572
    feedback_fn("* Verifying group '%s'" % self.group_info.name)
2573

    
2574
    if not self.my_node_names:
2575
      # empty node group
2576
      feedback_fn("* Empty node group, skipping verification")
2577
      return True
2578

    
2579
    self.bad = False
2580
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
2581
    verbose = self.op.verbose
2582
    self._feedback_fn = feedback_fn
2583

    
2584
    vg_name = self.cfg.GetVGName()
2585
    drbd_helper = self.cfg.GetDRBDHelper()
2586
    cluster = self.cfg.GetClusterInfo()
2587
    groupinfo = self.cfg.GetAllNodeGroupsInfo()
2588
    hypervisors = cluster.enabled_hypervisors
2589
    node_data_list = [self.my_node_info[name] for name in self.my_node_names]
2590

    
2591
    i_non_redundant = [] # Non redundant instances
2592
    i_non_a_balanced = [] # Non auto-balanced instances
2593
    n_offline = 0 # Count of offline nodes
2594
    n_drained = 0 # Count of nodes being drained
2595
    node_vol_should = {}
2596

    
2597
    # FIXME: verify OS list
2598

    
2599
    # File verification
2600
    filemap = _ComputeAncillaryFiles(cluster, False)
2601

    
2602
    # do local checksums
2603
    master_node = self.master_node = self.cfg.GetMasterNode()
2604
    master_ip = self.cfg.GetMasterIP()
2605

    
2606
    feedback_fn("* Gathering data (%d nodes)" % len(self.my_node_names))
2607

    
2608
    # We will make nodes contact all nodes in their group, and one node from
2609
    # every other group.
2610
    # TODO: should it be a *random* node, different every time?
2611
    online_nodes = [node.name for node in node_data_list if not node.offline]
2612
    other_group_nodes = {}
2613

    
2614
    for name in sorted(self.all_node_info):
2615
      node = self.all_node_info[name]
2616
      if (node.group not in other_group_nodes
2617
          and node.group != self.group_uuid
2618
          and not node.offline):
2619
        other_group_nodes[node.group] = node.name
2620

    
2621
    node_verify_param = {
2622
      constants.NV_FILELIST:
2623
        utils.UniqueSequence(filename
2624
                             for files in filemap
2625
                             for filename in files),
2626
      constants.NV_NODELIST: online_nodes + other_group_nodes.values(),
2627
      constants.NV_HYPERVISOR: hypervisors,
2628
      constants.NV_HVPARAMS:
2629
        _GetAllHypervisorParameters(cluster, self.all_inst_info.values()),
2630
      constants.NV_NODENETTEST: [(node.name, node.primary_ip, node.secondary_ip)
2631
                                 for node in node_data_list
2632
                                 if not node.offline],
2633
      constants.NV_INSTANCELIST: hypervisors,
2634
      constants.NV_VERSION: None,
2635
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
2636
      constants.NV_NODESETUP: None,
2637
      constants.NV_TIME: None,
2638
      constants.NV_MASTERIP: (master_node, master_ip),
2639
      constants.NV_OSLIST: None,
2640
      constants.NV_VMNODES: self.cfg.GetNonVmCapableNodeList(),
2641
      }
2642

    
2643
    if vg_name is not None:
2644
      node_verify_param[constants.NV_VGLIST] = None
2645
      node_verify_param[constants.NV_LVLIST] = vg_name
2646
      node_verify_param[constants.NV_PVLIST] = [vg_name]
2647
      node_verify_param[constants.NV_DRBDLIST] = None
2648

    
2649
    if drbd_helper:
2650
      node_verify_param[constants.NV_DRBDHELPER] = drbd_helper
2651

    
2652
    # bridge checks
2653
    # FIXME: this needs to be changed per node-group, not cluster-wide
2654
    bridges = set()
2655
    default_nicpp = cluster.nicparams[constants.PP_DEFAULT]
2656
    if default_nicpp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
2657
      bridges.add(default_nicpp[constants.NIC_LINK])
2658
    for instance in self.my_inst_info.values():
2659
      for nic in instance.nics:
2660
        full_nic = cluster.SimpleFillNIC(nic.nicparams)
2661
        if full_nic[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
2662
          bridges.add(full_nic[constants.NIC_LINK])
2663

    
2664
    if bridges:
2665
      node_verify_param[constants.NV_BRIDGES] = list(bridges)
2666

    
2667
    # Build our expected cluster state
2668
    node_image = dict((node.name, self.NodeImage(offline=node.offline,
2669
                                                 name=node.name,
2670
                                                 vm_capable=node.vm_capable))
2671
                      for node in node_data_list)
2672

    
2673
    # Gather OOB paths
2674
    oob_paths = []
2675
    for node in self.all_node_info.values():
2676
      path = _SupportsOob(self.cfg, node)
2677
      if path and path not in oob_paths:
2678
        oob_paths.append(path)
2679

    
2680
    if oob_paths:
2681
      node_verify_param[constants.NV_OOB_PATHS] = oob_paths
2682

    
2683
    for instance in self.my_inst_names:
2684
      inst_config = self.my_inst_info[instance]
2685

    
2686
      for nname in inst_config.all_nodes:
2687
        if nname not in node_image:
2688
          gnode = self.NodeImage(name=nname)
2689
          gnode.ghost = (nname not in self.all_node_info)
2690
          node_image[nname] = gnode
2691

    
2692
      inst_config.MapLVsByNode(node_vol_should)
2693

    
2694
      pnode = inst_config.primary_node
2695
      node_image[pnode].pinst.append(instance)
2696

    
2697
      for snode in inst_config.secondary_nodes:
2698
        nimg = node_image[snode]
2699
        nimg.sinst.append(instance)
2700
        if pnode not in nimg.sbp:
2701
          nimg.sbp[pnode] = []
2702
        nimg.sbp[pnode].append(instance)
2703

    
2704
    # At this point, we have the in-memory data structures complete,
2705
    # except for the runtime information, which we'll gather next
2706

    
2707
    # Due to the way our RPC system works, exact response times cannot be
2708
    # guaranteed (e.g. a broken node could run into a timeout). By keeping the
2709
    # time before and after executing the request, we can at least have a time
2710
    # window.
2711
    nvinfo_starttime = time.time()
2712
    all_nvinfo = self.rpc.call_node_verify(self.my_node_names,
2713
                                           node_verify_param,
2714
                                           self.cfg.GetClusterName())
2715
    nvinfo_endtime = time.time()
2716

    
2717
    if self.extra_lv_nodes and vg_name is not None:
2718
      extra_lv_nvinfo = \
2719
          self.rpc.call_node_verify(self.extra_lv_nodes,
2720
                                    {constants.NV_LVLIST: vg_name},
2721
                                    self.cfg.GetClusterName())
2722
    else:
2723
      extra_lv_nvinfo = {}
2724

    
2725
    all_drbd_map = self.cfg.ComputeDRBDMap()
2726

    
2727
    feedback_fn("* Gathering disk information (%s nodes)" %
2728
                len(self.my_node_names))
2729
    instdisk = self._CollectDiskInfo(self.my_node_names, node_image,
2730
                                     self.my_inst_info)
2731

    
2732
    feedback_fn("* Verifying configuration file consistency")
2733

    
2734
    # If not all nodes are being checked, we need to make sure the master node
2735
    # and a non-checked vm_capable node are in the list.
2736
    absent_nodes = set(self.all_node_info).difference(self.my_node_info)
2737
    if absent_nodes:
2738
      vf_nvinfo = all_nvinfo.copy()
2739
      vf_node_info = list(self.my_node_info.values())
2740
      additional_nodes = []
2741
      if master_node not in self.my_node_info:
2742
        additional_nodes.append(master_node)
2743
        vf_node_info.append(self.all_node_info[master_node])
2744
      # Add the first vm_capable node we find which is not included
2745
      for node in absent_nodes:
2746
        nodeinfo = self.all_node_info[node]
2747
        if nodeinfo.vm_capable and not nodeinfo.offline:
2748
          additional_nodes.append(node)
2749
          vf_node_info.append(self.all_node_info[node])
2750
          break
2751
      key = constants.NV_FILELIST
2752
      vf_nvinfo.update(self.rpc.call_node_verify(additional_nodes,
2753
                                                 {key: node_verify_param[key]},
2754
                                                 self.cfg.GetClusterName()))
2755
    else:
2756
      vf_nvinfo = all_nvinfo
2757
      vf_node_info = self.my_node_info.values()
2758

    
2759
    self._VerifyFiles(_ErrorIf, vf_node_info, master_node, vf_nvinfo, filemap)
2760

    
2761
    feedback_fn("* Verifying node status")
2762

    
2763
    refos_img = None
2764

    
2765
    for node_i in node_data_list:
2766
      node = node_i.name
2767
      nimg = node_image[node]
2768

    
2769
      if node_i.offline:
2770
        if verbose:
2771
          feedback_fn("* Skipping offline node %s" % (node,))
2772
        n_offline += 1
2773
        continue
2774

    
2775
      if node == master_node:
2776
        ntype = "master"
2777
      elif node_i.master_candidate:
2778
        ntype = "master candidate"
2779
      elif node_i.drained:
2780
        ntype = "drained"
2781
        n_drained += 1
2782
      else:
2783
        ntype = "regular"
2784
      if verbose:
2785
        feedback_fn("* Verifying node %s (%s)" % (node, ntype))
2786

    
2787
      msg = all_nvinfo[node].fail_msg
2788
      _ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg)
2789
      if msg:
2790
        nimg.rpc_fail = True
2791
        continue
2792

    
2793
      nresult = all_nvinfo[node].payload
2794

    
2795
      nimg.call_ok = self._VerifyNode(node_i, nresult)
2796
      self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
2797
      self._VerifyNodeNetwork(node_i, nresult)
2798
      self._VerifyOob(node_i, nresult)
2799

    
2800
      if nimg.vm_capable:
2801
        self._VerifyNodeLVM(node_i, nresult, vg_name)
2802
        self._VerifyNodeDrbd(node_i, nresult, self.all_inst_info, drbd_helper,
2803
                             all_drbd_map)
2804

    
2805
        self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
2806
        self._UpdateNodeInstances(node_i, nresult, nimg)
2807
        self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
2808
        self._UpdateNodeOS(node_i, nresult, nimg)
2809

    
2810
        if not nimg.os_fail:
2811
          if refos_img is None:
2812
            refos_img = nimg
2813
          self._VerifyNodeOS(node_i, nimg, refos_img)
2814
        self._VerifyNodeBridges(node_i, nresult, bridges)
2815

    
2816
        # Check whether all running instancies are primary for the node. (This
2817
        # can no longer be done from _VerifyInstance below, since some of the
2818
        # wrong instances could be from other node groups.)
2819
        non_primary_inst = set(nimg.instances).difference(nimg.pinst)
2820

    
2821
        for inst in non_primary_inst:
2822
          test = inst in self.all_inst_info
2823
          _ErrorIf(test, self.EINSTANCEWRONGNODE, inst,
2824
                   "instance should not run on node %s", node_i.name)
2825
          _ErrorIf(not test, self.ENODEORPHANINSTANCE, node_i.name,
2826
                   "node is running unknown instance %s", inst)
2827

    
2828
    for node, result in extra_lv_nvinfo.items():
2829
      self._UpdateNodeVolumes(self.all_node_info[node], result.payload,
2830
                              node_image[node], vg_name)
2831

    
2832
    feedback_fn("* Verifying instance status")
2833
    for instance in self.my_inst_names:
2834
      if verbose:
2835
        feedback_fn("* Verifying instance %s" % instance)
2836
      inst_config = self.my_inst_info[instance]
2837
      self._VerifyInstance(instance, inst_config, node_image,
2838
                           instdisk[instance])
2839
      inst_nodes_offline = []
2840

    
2841
      pnode = inst_config.primary_node
2842
      pnode_img = node_image[pnode]
2843
      _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
2844
               self.ENODERPC, pnode, "instance %s, connection to"
2845
               " primary node failed", instance)
2846

    
2847
      _ErrorIf(inst_config.admin_up and pnode_img.offline,
2848
               self.EINSTANCEBADNODE, instance,
2849
               "instance is marked as running and lives on offline node %s",
2850
               inst_config.primary_node)
2851

    
2852
      # If the instance is non-redundant we cannot survive losing its primary
2853
      # node, so we are not N+1 compliant. On the other hand we have no disk
2854
      # templates with more than one secondary so that situation is not well
2855
      # supported either.
2856
      # FIXME: does not support file-backed instances
2857
      if not inst_config.secondary_nodes:
2858
        i_non_redundant.append(instance)
2859

    
2860
      _ErrorIf(len(inst_config.secondary_nodes) > 1, self.EINSTANCELAYOUT,
2861
               instance, "instance has multiple secondary nodes: %s",
2862
               utils.CommaJoin(inst_config.secondary_nodes),
2863
               code=self.ETYPE_WARNING)
2864

    
2865
      if inst_config.disk_template in constants.DTS_INT_MIRROR:
2866
        pnode = inst_config.primary_node
2867
        instance_nodes = utils.NiceSort(inst_config.all_nodes)
2868
        instance_groups = {}
2869

    
2870
        for node in instance_nodes:
2871
          instance_groups.setdefault(self.all_node_info[node].group,
2872
                                     []).append(node)
2873

    
2874
        pretty_list = [
2875
          "%s (group %s)" % (utils.CommaJoin(nodes), groupinfo[group].name)
2876
          # Sort so that we always list the primary node first.
2877
          for group, nodes in sorted(instance_groups.items(),
2878
                                     key=lambda (_, nodes): pnode in nodes,
2879
                                     reverse=True)]
2880

    
2881
        self._ErrorIf(len(instance_groups) > 1, self.EINSTANCESPLITGROUPS,
2882
                      instance, "instance has primary and secondary nodes in"
2883
                      " different groups: %s", utils.CommaJoin(pretty_list),
2884
                      code=self.ETYPE_WARNING)
2885

    
2886
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
2887
        i_non_a_balanced.append(instance)
2888

    
2889
      for snode in inst_config.secondary_nodes:
2890
        s_img = node_image[snode]
2891
        _ErrorIf(s_img.rpc_fail and not s_img.offline, self.ENODERPC, snode,
2892
                 "instance %s, connection to secondary node failed", instance)
2893

    
2894
        if s_img.offline:
2895
          inst_nodes_offline.append(snode)
2896

    
2897
      # warn that the instance lives on offline nodes
2898
      _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
2899
               "instance has offline secondary node(s) %s",
2900
               utils.CommaJoin(inst_nodes_offline))
2901
      # ... or ghost/non-vm_capable nodes
2902
      for node in inst_config.all_nodes:
2903
        _ErrorIf(node_image[node].ghost, self.EINSTANCEBADNODE, instance,
2904
                 "instance lives on ghost node %s", node)
2905
        _ErrorIf(not node_image[node].vm_capable, self.EINSTANCEBADNODE,
2906
                 instance, "instance lives on non-vm_capable node %s", node)
2907

    
2908
    feedback_fn("* Verifying orphan volumes")
2909
    reserved = utils.FieldSet(*cluster.reserved_lvs)
2910

    
2911
    # We will get spurious "unknown volume" warnings if any node of this group
2912
    # is secondary for an instance whose primary is in another group. To avoid
2913
    # them, we find these instances and add their volumes to node_vol_should.
2914
    for inst in self.all_inst_info.values():
2915
      for secondary in inst.secondary_nodes:
2916
        if (secondary in self.my_node_info
2917
            and inst.name not in self.my_inst_info):
2918
          inst.MapLVsByNode(node_vol_should)
2919
          break
2920

    
2921
    self._VerifyOrphanVolumes(node_vol_should, node_image, reserved)
2922

    
2923
    if constants.VERIFY_NPLUSONE_MEM not in self.op.skip_checks:
2924
      feedback_fn("* Verifying N+1 Memory redundancy")
2925
      self._VerifyNPlusOneMemory(node_image, self.my_inst_info)
2926

    
2927
    feedback_fn("* Other Notes")
2928
    if i_non_redundant:
2929
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
2930
                  % len(i_non_redundant))
2931

    
2932
    if i_non_a_balanced:
2933
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
2934
                  % len(i_non_a_balanced))
2935

    
2936
    if n_offline:
2937
      feedback_fn("  - NOTICE: %d offline node(s) found." % n_offline)
2938

    
2939
    if n_drained:
2940
      feedback_fn("  - NOTICE: %d drained node(s) found." % n_drained)
2941

    
2942
    return not self.bad
2943

    
2944
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
2945
    """Analyze the post-hooks' result
2946

2947
    This method analyses the hook result, handles it, and sends some
2948
    nicely-formatted feedback back to the user.
2949

2950
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
2951
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
2952
    @param hooks_results: the results of the multi-node hooks rpc call
2953
    @param feedback_fn: function used send feedback back to the caller
2954
    @param lu_result: previous Exec result
2955
    @return: the new Exec result, based on the previous result
2956
        and hook results
2957

2958
    """
2959
    # We only really run POST phase hooks, only for non-empty groups,
2960
    # and are only interested in their results
2961
    if not self.my_node_names:
2962
      # empty node group
2963
      pass
2964
    elif phase == constants.HOOKS_PHASE_POST:
2965
      # Used to change hooks' output to proper indentation
2966
      feedback_fn("* Hooks Results")
2967
      assert hooks_results, "invalid result from hooks"
2968

    
2969
      for node_name in hooks_results:
2970
        res = hooks_results[node_name]
2971
        msg = res.fail_msg
2972
        test = msg and not res.offline
2973
        self._ErrorIf(test, self.ENODEHOOKS, node_name,
2974
                      "Communication failure in hooks execution: %s", msg)
2975
        if res.offline or msg:
2976
          # No need to investigate payload if node is offline or gave an error.
2977
          # override manually lu_result here as _ErrorIf only
2978
          # overrides self.bad
2979
          lu_result = 1
2980
          continue
2981
        for script, hkr, output in res.payload:
2982
          test = hkr == constants.HKR_FAIL
2983
          self._ErrorIf(test, self.ENODEHOOKS, node_name,
2984
                        "Script %s failed, output:", script)
2985
          if test:
2986
            output = self._HOOKS_INDENT_RE.sub("      ", output)
2987
            feedback_fn("%s" % output)
2988
            lu_result = 0
2989

    
2990
    return lu_result
2991

    
2992

    
2993
class LUClusterVerifyDisks(NoHooksLU):
2994
  """Verifies the cluster disks status.
2995

2996
  """
2997
  REQ_BGL = False
2998

    
2999
  def ExpandNames(self):
3000
    self.share_locks = _ShareAll()
3001
    self.needed_locks = {
3002
      locking.LEVEL_NODEGROUP: locking.ALL_SET,
3003
      }
3004

    
3005
  def Exec(self, feedback_fn):
3006
    group_names = self.owned_locks(locking.LEVEL_NODEGROUP)
3007

    
3008
    # Submit one instance of L{opcodes.OpGroupVerifyDisks} per node group
3009
    return ResultWithJobs([[opcodes.OpGroupVerifyDisks(group_name=group)]
3010
                           for group in group_names])
3011

    
3012

    
3013
class LUGroupVerifyDisks(NoHooksLU):
3014
  """Verifies the status of all disks in a node group.
3015

3016
  """
3017
  REQ_BGL = False
3018

    
3019
  def ExpandNames(self):
3020
    # Raises errors.OpPrereqError on its own if group can't be found
3021
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
3022

    
3023
    self.share_locks = _ShareAll()
3024
    self.needed_locks = {
3025
      locking.LEVEL_INSTANCE: [],
3026
      locking.LEVEL_NODEGROUP: [],
3027
      locking.LEVEL_NODE: [],
3028
      }
3029

    
3030
  def DeclareLocks(self, level):
3031
    if level == locking.LEVEL_INSTANCE:
3032
      assert not self.needed_locks[locking.LEVEL_INSTANCE]
3033

    
3034
      # Lock instances optimistically, needs verification once node and group
3035
      # locks have been acquired
3036
      self.needed_locks[locking.LEVEL_INSTANCE] = \
3037
        self.cfg.GetNodeGroupInstances(self.group_uuid)
3038

    
3039
    elif level == locking.LEVEL_NODEGROUP:
3040
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3041

    
3042
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
3043
        set([self.group_uuid] +
3044
            # Lock all groups used by instances optimistically; this requires
3045
            # going via the node before it's locked, requiring verification
3046
            # later on
3047
            [group_uuid
3048
             for instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
3049
             for group_uuid in self.cfg.GetInstanceNodeGroups(instance_name)])
3050

    
3051
    elif level == locking.LEVEL_NODE:
3052
      # This will only lock the nodes in the group to be verified which contain
3053
      # actual instances
3054
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3055
      self._LockInstancesNodes()
3056

    
3057
      # Lock all nodes in group to be verified
3058
      assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
3059
      member_nodes = self.cfg.GetNodeGroup(self.group_uuid).members
3060
      self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3061

    
3062
  def CheckPrereq(self):
3063
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3064
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3065
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3066

    
3067
    assert self.group_uuid in owned_groups
3068

    
3069
    # Check if locked instances are still correct
3070
    _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
3071

    
3072
    # Get instance information
3073
    self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
3074

    
3075
    # Check if node groups for locked instances are still correct
3076
    for (instance_name, inst) in self.instances.items():
3077
      assert owned_nodes.issuperset(inst.all_nodes), \
3078
        "Instance %s's nodes changed while we kept the lock" % instance_name
3079

    
3080
      inst_groups = _CheckInstanceNodeGroups(self.cfg, instance_name,
3081
                                             owned_groups)
3082

    
3083
      assert self.group_uuid in inst_groups, \
3084
        "Instance %s has no node in group %s" % (instance_name, self.group_uuid)
3085

    
3086
  def Exec(self, feedback_fn):
3087
    """Verify integrity of cluster disks.
3088

3089
    @rtype: tuple of three items
3090
    @return: a tuple of (dict of node-to-node_error, list of instances
3091
        which need activate-disks, dict of instance: (node, volume) for
3092
        missing volumes
3093

3094
    """
3095
    res_nodes = {}
3096
    res_instances = set()
3097
    res_missing = {}
3098

    
3099
    nv_dict = _MapInstanceDisksToNodes([inst
3100
                                        for inst in self.instances.values()
3101
                                        if inst.admin_up])
3102

    
3103
    if nv_dict:
3104
      nodes = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) &
3105
                             set(self.cfg.GetVmCapableNodeList()))
3106

    
3107
      node_lvs = self.rpc.call_lv_list(nodes, [])
3108

    
3109
      for (node, node_res) in node_lvs.items():
3110
        if node_res.offline:
3111
          continue
3112

    
3113
        msg = node_res.fail_msg
3114
        if msg:
3115
          logging.warning("Error enumerating LVs on node %s: %s", node, msg)
3116
          res_nodes[node] = msg
3117
          continue
3118

    
3119
        for lv_name, (_, _, lv_online) in node_res.payload.items():
3120
          inst = nv_dict.pop((node, lv_name), None)
3121
          if not (lv_online or inst is None):
3122
            res_instances.add(inst)
3123

    
3124
      # any leftover items in nv_dict are missing LVs, let's arrange the data
3125
      # better
3126
      for key, inst in nv_dict.iteritems():
3127
        res_missing.setdefault(inst, []).append(key)
3128

    
3129
    return (res_nodes, list(res_instances), res_missing)
3130

    
3131

    
3132
class LUClusterRepairDiskSizes(NoHooksLU):
3133
  """Verifies the cluster disks sizes.
3134

3135
  """
3136
  REQ_BGL = False
3137

    
3138
  def ExpandNames(self):
3139
    if self.op.instances:
3140
      self.wanted_names = _GetWantedInstances(self, self.op.instances)
3141
      self.needed_locks = {
3142
        locking.LEVEL_NODE: [],
3143
        locking.LEVEL_INSTANCE: self.wanted_names,
3144
        }
3145
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3146
    else:
3147
      self.wanted_names = None
3148
      self.needed_locks = {
3149
        locking.LEVEL_NODE: locking.ALL_SET,
3150
        locking.LEVEL_INSTANCE: locking.ALL_SET,
3151
        }
3152
    self.share_locks = _ShareAll()
3153

    
3154
  def DeclareLocks(self, level):
3155
    if level == locking.LEVEL_NODE and self.wanted_names is not None:
3156
      self._LockInstancesNodes(primary_only=True)
3157

    
3158
  def CheckPrereq(self):
3159
    """Check prerequisites.
3160

3161
    This only checks the optional instance list against the existing names.
3162

3163
    """
3164
    if self.wanted_names is None:
3165
      self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE)
3166

    
3167
    self.wanted_instances = \
3168
        map(compat.snd, self.cfg.GetMultiInstanceInfo(self.wanted_names))
3169

    
3170
  def _EnsureChildSizes(self, disk):
3171
    """Ensure children of the disk have the needed disk size.
3172

3173
    This is valid mainly for DRBD8 and fixes an issue where the
3174
    children have smaller disk size.
3175

3176
    @param disk: an L{ganeti.objects.Disk} object
3177

3178
    """
3179
    if disk.dev_type == constants.LD_DRBD8:
3180
      assert disk.children, "Empty children for DRBD8?"
3181
      fchild = disk.children[0]
3182
      mismatch = fchild.size < disk.size
3183
      if mismatch:
3184
        self.LogInfo("Child disk has size %d, parent %d, fixing",
3185
                     fchild.size, disk.size)
3186
        fchild.size = disk.size
3187

    
3188
      # and we recurse on this child only, not on the metadev
3189
      return self._EnsureChildSizes(fchild) or mismatch
3190
    else:
3191
      return False
3192

    
3193
  def Exec(self, feedback_fn):
3194
    """Verify the size of cluster disks.
3195

3196
    """
3197
    # TODO: check child disks too
3198
    # TODO: check differences in size between primary/secondary nodes
3199
    per_node_disks = {}
3200
    for instance in self.wanted_instances:
3201
      pnode = instance.primary_node
3202
      if pnode not in per_node_disks:
3203
        per_node_disks[pnode] = []
3204
      for idx, disk in enumerate(instance.disks):
3205
        per_node_disks[pnode].append((instance, idx, disk))
3206

    
3207
    changed = []
3208
    for node, dskl in per_node_disks.items():
3209
      newl = [v[2].Copy() for v in dskl]
3210
      for dsk in newl:
3211
        self.cfg.SetDiskID(dsk, node)
3212
      result = self.rpc.call_blockdev_getsize(node, newl)
3213
      if result.fail_msg:
3214
        self.LogWarning("Failure in blockdev_getsize call to node"
3215
                        " %s, ignoring", node)
3216
        continue
3217
      if len(result.payload) != len(dskl):
3218
        logging.warning("Invalid result from node %s: len(dksl)=%d,"
3219
                        " result.payload=%s", node, len(dskl), result.payload)
3220
        self.LogWarning("Invalid result from node %s, ignoring node results",
3221
                        node)
3222
        continue
3223
      for ((instance, idx, disk), size) in zip(dskl, result.payload):
3224
        if size is None:
3225
          self.LogWarning("Disk %d of instance %s did not return size"
3226
                          " information, ignoring", idx, instance.name)
3227
          continue
3228
        if not isinstance(size, (int, long)):
3229
          self.LogWarning("Disk %d of instance %s did not return valid"
3230
                          " size information, ignoring", idx, instance.name)
3231
          continue
3232
        size = size >> 20
3233
        if size != disk.size:
3234
          self.LogInfo("Disk %d of instance %s has mismatched size,"
3235
                       " correcting: recorded %d, actual %d", idx,
3236
                       instance.name, disk.size, size)
3237
          disk.size = size
3238
          self.cfg.Update(instance, feedback_fn)
3239
          changed.append((instance.name, idx, size))
3240
        if self._EnsureChildSizes(disk):
3241
          self.cfg.Update(instance, feedback_fn)
3242
          changed.append((instance.name, idx, disk.size))
3243
    return changed
3244

    
3245

    
3246
class LUClusterRename(LogicalUnit):
3247
  """Rename the cluster.
3248

3249
  """
3250
  HPATH = "cluster-rename"
3251
  HTYPE = constants.HTYPE_CLUSTER
3252

    
3253
  def BuildHooksEnv(self):
3254
    """Build hooks env.
3255

3256
    """
3257
    return {
3258
      "OP_TARGET": self.cfg.GetClusterName(),
3259
      "NEW_NAME": self.op.name,
3260
      }
3261

    
3262
  def BuildHooksNodes(self):
3263
    """Build hooks nodes.
3264

3265
    """
3266
    return ([self.cfg.GetMasterNode()], self.cfg.GetNodeList())
3267

    
3268
  def CheckPrereq(self):
3269
    """Verify that the passed name is a valid one.
3270

3271
    """
3272
    hostname = netutils.GetHostname(name=self.op.name,
3273
                                    family=self.cfg.GetPrimaryIPFamily())
3274

    
3275
    new_name = hostname.name
3276
    self.ip = new_ip = hostname.ip
3277
    old_name = self.cfg.GetClusterName()
3278
    old_ip = self.cfg.GetMasterIP()
3279
    if new_name == old_name and new_ip == old_ip:
3280
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
3281
                                 " cluster has changed",
3282
                                 errors.ECODE_INVAL)
3283
    if new_ip != old_ip:
3284
      if netutils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
3285
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
3286
                                   " reachable on the network" %
3287
                                   new_ip, errors.ECODE_NOTUNIQUE)
3288

    
3289
    self.op.name = new_name
3290

    
3291
  def Exec(self, feedback_fn):
3292
    """Rename the cluster.
3293

3294
    """
3295
    clustername = self.op.name
3296
    ip = self.ip
3297

    
3298
    # shutdown the master IP
3299
    master = self.cfg.GetMasterNode()
3300
    result = self.rpc.call_node_stop_master(master, False)
3301
    result.Raise("Could not disable the master role")
3302

    
3303
    try:
3304
      cluster = self.cfg.GetClusterInfo()
3305
      cluster.cluster_name = clustername
3306
      cluster.master_ip = ip
3307
      self.cfg.Update(cluster, feedback_fn)
3308

    
3309
      # update the known hosts file
3310
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
3311
      node_list = self.cfg.GetOnlineNodeList()
3312
      try:
3313
        node_list.remove(master)
3314
      except ValueError:
3315
        pass
3316
      _UploadHelper(self, node_list, constants.SSH_KNOWN_HOSTS_FILE)
3317
    finally:
3318
      result = self.rpc.call_node_start_master(master, False, False)
3319
      msg = result.fail_msg
3320
      if msg:
3321
        self.LogWarning("Could not re-enable the master role on"
3322
                        " the master, please restart manually: %s", msg)
3323

    
3324
    return clustername
3325

    
3326

    
3327
class LUClusterSetParams(LogicalUnit):
3328
  """Change the parameters of the cluster.
3329

3330
  """
3331
  HPATH = "cluster-modify"
3332
  HTYPE = constants.HTYPE_CLUSTER
3333
  REQ_BGL = False
3334

    
3335
  def CheckArguments(self):
3336
    """Check parameters
3337

3338
    """
3339
    if self.op.uid_pool:
3340
      uidpool.CheckUidPool(self.op.uid_pool)
3341

    
3342
    if self.op.add_uids:
3343
      uidpool.CheckUidPool(self.op.add_uids)
3344

    
3345
    if self.op.remove_uids:
3346
      uidpool.CheckUidPool(self.op.remove_uids)
3347

    
3348
  def ExpandNames(self):
3349
    # FIXME: in the future maybe other cluster params won't require checking on
3350
    # all nodes to be modified.
3351
    self.needed_locks = {
3352
      locking.LEVEL_NODE: locking.ALL_SET,
3353
    }
3354
    self.share_locks[locking.LEVEL_NODE] = 1
3355

    
3356
  def BuildHooksEnv(self):
3357
    """Build hooks env.
3358

3359
    """
3360
    return {
3361
      "OP_TARGET": self.cfg.GetClusterName(),
3362
      "NEW_VG_NAME": self.op.vg_name,
3363
      }
3364

    
3365
  def BuildHooksNodes(self):
3366
    """Build hooks nodes.
3367

3368
    """
3369
    mn = self.cfg.GetMasterNode()
3370
    return ([mn], [mn])
3371

    
3372
  def CheckPrereq(self):
3373
    """Check prerequisites.
3374

3375
    This checks whether the given params don't conflict and
3376
    if the given volume group is valid.
3377

3378
    """
3379
    if self.op.vg_name is not None and not self.op.vg_name:
3380
      if self.cfg.HasAnyDiskOfType(constants.LD_LV):
3381
        raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
3382
                                   " instances exist", errors.ECODE_INVAL)
3383

    
3384
    if self.op.drbd_helper is not None and not self.op.drbd_helper:
3385
      if self.cfg.HasAnyDiskOfType(constants.LD_DRBD8):
3386
        raise errors.OpPrereqError("Cannot disable drbd helper while"
3387
                                   " drbd-based instances exist",
3388
                                   errors.ECODE_INVAL)
3389

    
3390
    node_list = self.owned_locks(locking.LEVEL_NODE)
3391

    
3392
    # if vg_name not None, checks given volume group on all nodes
3393
    if self.op.vg_name:
3394
      vglist = self.rpc.call_vg_list(node_list)
3395
      for node in node_list:
3396
        msg = vglist[node].fail_msg
3397
        if msg:
3398
          # ignoring down node
3399
          self.LogWarning("Error while gathering data on node %s"
3400
                          " (ignoring node): %s", node, msg)
3401
          continue
3402
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
3403
                                              self.op.vg_name,
3404
                                              constants.MIN_VG_SIZE)
3405
        if vgstatus:
3406
          raise errors.OpPrereqError("Error on node '%s': %s" %
3407
                                     (node, vgstatus), errors.ECODE_ENVIRON)
3408

    
3409
    if self.op.drbd_helper:
3410
      # checks given drbd helper on all nodes
3411
      helpers = self.rpc.call_drbd_helper(node_list)
3412
      for (node, ninfo) in self.cfg.GetMultiNodeInfo(node_list):
3413
        if ninfo.offline:
3414
          self.LogInfo("Not checking drbd helper on offline node %s", node)
3415
          continue
3416
        msg = helpers[node].fail_msg
3417
        if msg:
3418
          raise errors.OpPrereqError("Error checking drbd helper on node"
3419
                                     " '%s': %s" % (node, msg),
3420
                                     errors.ECODE_ENVIRON)
3421
        node_helper = helpers[node].payload
3422
        if node_helper != self.op.drbd_helper:
3423
          raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
3424
                                     (node, node_helper), errors.ECODE_ENVIRON)
3425

    
3426
    self.cluster = cluster = self.cfg.GetClusterInfo()
3427
    # validate params changes
3428
    if self.op.beparams:
3429
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
3430
      self.new_beparams = cluster.SimpleFillBE(self.op.beparams)
3431

    
3432
    if self.op.ndparams:
3433
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
3434
      self.new_ndparams = cluster.SimpleFillND(self.op.ndparams)
3435

    
3436
      # TODO: we need a more general way to handle resetting
3437
      # cluster-level parameters to default values
3438
      if self.new_ndparams["oob_program"] == "":
3439
        self.new_ndparams["oob_program"] = \
3440
            constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM]
3441

    
3442
    if self.op.nicparams:
3443
      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
3444
      self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
3445
      objects.NIC.CheckParameterSyntax(self.new_nicparams)
3446
      nic_errors = []
3447

    
3448
      # check all instances for consistency
3449
      for instance in self.cfg.GetAllInstancesInfo().values():
3450
        for nic_idx, nic in enumerate(instance.nics):
3451
          params_copy = copy.deepcopy(nic.nicparams)
3452
          params_filled = objects.FillDict(self.new_nicparams, params_copy)
3453

    
3454
          # check parameter syntax
3455
          try:
3456
            objects.NIC.CheckParameterSyntax(params_filled)
3457
          except errors.ConfigurationError, err:
3458
            nic_errors.append("Instance %s, nic/%d: %s" %
3459
                              (instance.name, nic_idx, err))
3460

    
3461
          # if we're moving instances to routed, check that they have an ip
3462
          target_mode = params_filled[constants.NIC_MODE]
3463
          if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
3464
            nic_errors.append("Instance %s, nic/%d: routed NIC with no ip"
3465
                              " address" % (instance.name, nic_idx))
3466
      if nic_errors:
3467
        raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
3468
                                   "\n".join(nic_errors))
3469

    
3470
    # hypervisor list/parameters
3471
    self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
3472
    if self.op.hvparams:
3473
      for hv_name, hv_dict in self.op.hvparams.items():
3474
        if hv_name not in self.new_hvparams:
3475
          self.new_hvparams[hv_name] = hv_dict
3476
        else:
3477
          self.new_hvparams[hv_name].update(hv_dict)
3478

    
3479
    # os hypervisor parameters
3480
    self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
3481
    if self.op.os_hvp:
3482
      for os_name, hvs in self.op.os_hvp.items():
3483
        if os_name not in self.new_os_hvp:
3484
          self.new_os_hvp[os_name] = hvs
3485
        else:
3486
          for hv_name, hv_dict in hvs.items():
3487
            if hv_name not in self.new_os_hvp[os_name]:
3488
              self.new_os_hvp[os_name][hv_name] = hv_dict
3489
            else:
3490
              self.new_os_hvp[os_name][hv_name].update(hv_dict)
3491

    
3492
    # os parameters
3493
    self.new_osp = objects.FillDict(cluster.osparams, {})
3494
    if self.op.osparams:
3495
      for os_name, osp in self.op.osparams.items():
3496
        if os_name not in self.new_osp:
3497
          self.new_osp[os_name] = {}
3498

    
3499
        self.new_osp[os_name] = _GetUpdatedParams(self.new_osp[os_name], osp,
3500
                                                  use_none=True)
3501

    
3502
        if not self.new_osp[os_name]:
3503
          # we removed all parameters
3504
          del self.new_osp[os_name]
3505
        else:
3506
          # check the parameter validity (remote check)
3507
          _CheckOSParams(self, False, [self.cfg.GetMasterNode()],
3508
                         os_name, self.new_osp[os_name])
3509

    
3510
    # changes to the hypervisor list
3511
    if self.op.enabled_hypervisors is not None:
3512
      self.hv_list = self.op.enabled_hypervisors
3513
      for hv in self.hv_list:
3514
        # if the hypervisor doesn't already exist in the cluster
3515
        # hvparams, we initialize it to empty, and then (in both
3516
        # cases) we make sure to fill the defaults, as we might not
3517
        # have a complete defaults list if the hypervisor wasn't
3518
        # enabled before
3519
        if hv not in new_hvp:
3520
          new_hvp[hv] = {}
3521
        new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
3522
        utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
3523
    else:
3524
      self.hv_list = cluster.enabled_hypervisors
3525

    
3526
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
3527
      # either the enabled list has changed, or the parameters have, validate
3528
      for hv_name, hv_params in self.new_hvparams.items():
3529
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
3530
            (self.op.enabled_hypervisors and
3531
             hv_name in self.op.enabled_hypervisors)):
3532
          # either this is a new hypervisor, or its parameters have changed
3533
          hv_class = hypervisor.GetHypervisor(hv_name)
3534
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
3535
          hv_class.CheckParameterSyntax(hv_params)
3536
          _CheckHVParams(self, node_list, hv_name, hv_params)
3537

    
3538
    if self.op.os_hvp:
3539
      # no need to check any newly-enabled hypervisors, since the
3540
      # defaults have already been checked in the above code-block
3541
      for os_name, os_hvp in self.new_os_hvp.items():
3542
        for hv_name, hv_params in os_hvp.items():
3543
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
3544
          # we need to fill in the new os_hvp on top of the actual hv_p
3545
          cluster_defaults = self.new_hvparams.get(hv_name, {})
3546
          new_osp = objects.FillDict(cluster_defaults, hv_params)
3547
          hv_class = hypervisor.GetHypervisor(hv_name)
3548
          hv_class.CheckParameterSyntax(new_osp)
3549
          _CheckHVParams(self, node_list, hv_name, new_osp)
3550

    
3551
    if self.op.default_iallocator:
3552
      alloc_script = utils.FindFile(self.op.default_iallocator,
3553
                                    constants.IALLOCATOR_SEARCH_PATH,
3554
                                    os.path.isfile)
3555
      if alloc_script is None:
3556
        raise errors.OpPrereqError("Invalid default iallocator script '%s'"
3557
                                   " specified" % self.op.default_iallocator,
3558
                                   errors.ECODE_INVAL)
3559

    
3560
  def Exec(self, feedback_fn):
3561
    """Change the parameters of the cluster.
3562

3563
    """
3564
    if self.op.vg_name is not None:
3565
      new_volume = self.op.vg_name
3566
      if not new_volume:
3567
        new_volume = None
3568
      if new_volume != self.cfg.GetVGName():
3569
        self.cfg.SetVGName(new_volume)
3570
      else:
3571
        feedback_fn("Cluster LVM configuration already in desired"
3572
                    " state, not changing")
3573
    if self.op.drbd_helper is not None:
3574
      new_helper = self.op.drbd_helper
3575
      if not new_helper:
3576
        new_helper = None
3577
      if new_helper != self.cfg.GetDRBDHelper():
3578
        self.cfg.SetDRBDHelper(new_helper)
3579
      else:
3580
        feedback_fn("Cluster DRBD helper already in desired state,"
3581
                    " not changing")
3582
    if self.op.hvparams:
3583
      self.cluster.hvparams = self.new_hvparams
3584
    if self.op.os_hvp:
3585
      self.cluster.os_hvp = self.new_os_hvp
3586
    if self.op.enabled_hypervisors is not None:
3587
      self.cluster.hvparams = self.new_hvparams
3588
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
3589
    if self.op.beparams:
3590
      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
3591
    if self.op.nicparams:
3592
      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
3593
    if self.op.osparams:
3594
      self.cluster.osparams = self.new_osp
3595
    if self.op.ndparams:
3596
      self.cluster.ndparams = self.new_ndparams
3597

    
3598
    if self.op.candidate_pool_size is not None:
3599
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
3600
      # we need to update the pool size here, otherwise the save will fail
3601
      _AdjustCandidatePool(self, [])
3602

    
3603
    if self.op.maintain_node_health is not None:
3604
      self.cluster.maintain_node_health = self.op.maintain_node_health
3605

    
3606
    if self.op.prealloc_wipe_disks is not None:
3607
      self.cluster.prealloc_wipe_disks = self.op.prealloc_wipe_disks
3608

    
3609
    if self.op.add_uids is not None:
3610
      uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
3611

    
3612
    if self.op.remove_uids is not None:
3613
      uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
3614

    
3615
    if self.op.uid_pool is not None:
3616
      self.cluster.uid_pool = self.op.uid_pool
3617

    
3618
    if self.op.default_iallocator is not None:
3619
      self.cluster.default_iallocator = self.op.default_iallocator
3620

    
3621
    if self.op.reserved_lvs is not None:
3622
      self.cluster.reserved_lvs = self.op.reserved_lvs
3623

    
3624
    def helper_os(aname, mods, desc):
3625
      desc += " OS list"
3626
      lst = getattr(self.cluster, aname)
3627
      for key, val in mods:
3628
        if key == constants.DDM_ADD:
3629
          if val in lst:
3630
            feedback_fn("OS %s already in %s, ignoring" % (val, desc))
3631
          else:
3632
            lst.append(val)
3633
        elif key == constants.DDM_REMOVE:
3634
          if val in lst:
3635
            lst.remove(val)
3636
          else:
3637
            feedback_fn("OS %s not found in %s, ignoring" % (val, desc))
3638
        else:
3639
          raise errors.ProgrammerError("Invalid modification '%s'" % key)
3640

    
3641
    if self.op.hidden_os:
3642
      helper_os("hidden_os", self.op.hidden_os, "hidden")
3643

    
3644
    if self.op.blacklisted_os:
3645
      helper_os("blacklisted_os", self.op.blacklisted_os, "blacklisted")
3646

    
3647
    if self.op.master_netdev:
3648
      master = self.cfg.GetMasterNode()
3649
      feedback_fn("Shutting down master ip on the current netdev (%s)" %
3650
                  self.cluster.master_netdev)
3651
      result = self.rpc.call_node_stop_master(master, False)
3652
      result.Raise("Could not disable the master ip")
3653
      feedback_fn("Changing master_netdev from %s to %s" %
3654
                  (self.cluster.master_netdev, self.op.master_netdev))
3655
      self.cluster.master_netdev = self.op.master_netdev
3656

    
3657
    self.cfg.Update(self.cluster, feedback_fn)
3658

    
3659
    if self.op.master_netdev:
3660
      feedback_fn("Starting the master ip on the new master netdev (%s)" %
3661
                  self.op.master_netdev)
3662
      result = self.rpc.call_node_start_master(master, False, False)
3663
      if result.fail_msg:
3664
        self.LogWarning("Could not re-enable the master ip on"
3665
                        " the master, please restart manually: %s",
3666
                        result.fail_msg)
3667

    
3668

    
3669
def _UploadHelper(lu, nodes, fname):
3670
  """Helper for uploading a file and showing warnings.
3671

3672
  """
3673
  if os.path.exists(fname):
3674
    result = lu.rpc.call_upload_file(nodes, fname)
3675
    for to_node, to_result in result.items():
3676
      msg = to_result.fail_msg
3677
      if msg:
3678
        msg = ("Copy of file %s to node %s failed: %s" %
3679
               (fname, to_node, msg))
3680
        lu.proc.LogWarning(msg)
3681

    
3682

    
3683
def _ComputeAncillaryFiles(cluster, redist):
3684
  """Compute files external to Ganeti which need to be consistent.
3685

3686
  @type redist: boolean
3687
  @param redist: Whether to include files which need to be redistributed
3688

3689
  """
3690
  # Compute files for all nodes
3691
  files_all = set([
3692
    constants.SSH_KNOWN_HOSTS_FILE,
3693
    constants.CONFD_HMAC_KEY,
3694
    constants.CLUSTER_DOMAIN_SECRET_FILE,
3695
    ])
3696

    
3697
  if not redist:
3698
    files_all.update(constants.ALL_CERT_FILES)
3699
    files_all.update(ssconf.SimpleStore().GetFileList())
3700

    
3701
  if cluster.modify_etc_hosts:
3702
    files_all.add(constants.ETC_HOSTS)
3703

    
3704
  # Files which must either exist on all nodes or on none
3705
  files_all_opt = set([
3706
    constants.RAPI_USERS_FILE,
3707
    ])
3708

    
3709
  # Files which should only be on master candidates
3710
  files_mc = set()
3711
  if not redist:
3712
    files_mc.add(constants.CLUSTER_CONF_FILE)
3713

    
3714
  # Files which should only be on VM-capable nodes
3715
  files_vm = set(filename
3716
    for hv_name in cluster.enabled_hypervisors
3717
    for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles())
3718

    
3719
  # Filenames must be unique
3720
  assert (len(files_all | files_all_opt | files_mc | files_vm) ==
3721
          sum(map(len, [files_all, files_all_opt, files_mc, files_vm]))), \
3722
         "Found file listed in more than one file list"
3723

    
3724
  return (files_all, files_all_opt, files_mc, files_vm)
3725

    
3726

    
3727
def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
3728
  """Distribute additional files which are part of the cluster configuration.
3729

3730
  ConfigWriter takes care of distributing the config and ssconf files, but
3731
  there are more files which should be distributed to all nodes. This function
3732
  makes sure those are copied.
3733

3734
  @param lu: calling logical unit
3735
  @param additional_nodes: list of nodes not in the config to distribute to
3736
  @type additional_vm: boolean
3737
  @param additional_vm: whether the additional nodes are vm-capable or not
3738

3739
  """
3740
  # Gather target nodes
3741
  cluster = lu.cfg.GetClusterInfo()
3742
  master_info = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
3743

    
3744
  online_nodes = lu.cfg.GetOnlineNodeList()
3745
  vm_nodes = lu.cfg.GetVmCapableNodeList()
3746

    
3747
  if additional_nodes is not None:
3748
    online_nodes.extend(additional_nodes)
3749
    if additional_vm:
3750
      vm_nodes.extend(additional_nodes)
3751

    
3752
  # Never distribute to master node
3753
  for nodelist in [online_nodes, vm_nodes]:
3754
    if master_info.name in nodelist:
3755
      nodelist.remove(master_info.name)
3756

    
3757
  # Gather file lists
3758
  (files_all, files_all_opt, files_mc, files_vm) = \
3759
    _ComputeAncillaryFiles(cluster, True)
3760

    
3761
  # Never re-distribute configuration file from here
3762
  assert not (constants.CLUSTER_CONF_FILE in files_all or
3763
              constants.CLUSTER_CONF_FILE in files_vm)
3764
  assert not files_mc, "Master candidates not handled in this function"
3765

    
3766
  filemap = [
3767
    (online_nodes, files_all),
3768
    (online_nodes, files_all_opt),
3769
    (vm_nodes, files_vm),
3770
    ]
3771

    
3772
  # Upload the files
3773
  for (node_list, files) in filemap:
3774
    for fname in files:
3775
      _UploadHelper(lu, node_list, fname)
3776

    
3777

    
3778
class LUClusterRedistConf(NoHooksLU):
3779
  """Force the redistribution of cluster configuration.
3780

3781
  This is a very simple LU.
3782

3783
  """
3784
  REQ_BGL = False
3785

    
3786
  def ExpandNames(self):
3787
    self.needed_locks = {
3788
      locking.LEVEL_NODE: locking.ALL_SET,
3789
    }
3790
    self.share_locks[locking.LEVEL_NODE] = 1
3791

    
3792
  def Exec(self, feedback_fn):
3793
    """Redistribute the configuration.
3794

3795
    """
3796
    self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
3797
    _RedistributeAncillaryFiles(self)
3798

    
3799

    
3800
def _WaitForSync(lu, instance, disks=None, oneshot=False):
3801
  """Sleep and poll for an instance's disk to sync.
3802

3803
  """
3804
  if not instance.disks or disks is not None and not disks:
3805
    return True
3806

    
3807
  disks = _ExpandCheckDisks(instance, disks)
3808

    
3809
  if not oneshot:
3810
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
3811

    
3812
  node = instance.primary_node
3813

    
3814
  for dev in disks:
3815
    lu.cfg.SetDiskID(dev, node)
3816

    
3817
  # TODO: Convert to utils.Retry
3818

    
3819
  retries = 0
3820
  degr_retries = 10 # in seconds, as we sleep 1 second each time
3821
  while True:
3822
    max_time = 0
3823
    done = True
3824
    cumul_degraded = False
3825
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, disks)
3826
    msg = rstats.fail_msg
3827
    if msg:
3828
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
3829
      retries += 1
3830
      if retries >= 10:
3831
        raise errors.RemoteError("Can't contact node %s for mirror data,"
3832
                                 " aborting." % node)
3833
      time.sleep(6)
3834
      continue
3835
    rstats = rstats.payload
3836
    retries = 0
3837
    for i, mstat in enumerate(rstats):
3838
      if mstat is None:
3839
        lu.LogWarning("Can't compute data for node %s/%s",
3840
                           node, disks[i].iv_name)
3841
        continue
3842

    
3843
      cumul_degraded = (cumul_degraded or
3844
                        (mstat.is_degraded and mstat.sync_percent is None))
3845
      if mstat.sync_percent is not None:
3846
        done = False
3847
        if mstat.estimated_time is not None:
3848
          rem_time = ("%s remaining (estimated)" %
3849
                      utils.FormatSeconds(mstat.estimated_time))
3850
          max_time = mstat.estimated_time
3851
        else:
3852
          rem_time = "no time estimate"
3853
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
3854
                        (disks[i].iv_name, mstat.sync_percent, rem_time))
3855

    
3856
    # if we're done but degraded, let's do a few small retries, to
3857
    # make sure we see a stable and not transient situation; therefore
3858
    # we force restart of the loop
3859
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
3860
      logging.info("Degraded disks found, %d retries left", degr_retries)
3861
      degr_retries -= 1
3862
      time.sleep(1)
3863
      continue
3864

    
3865
    if done or oneshot:
3866
      break
3867

    
3868
    time.sleep(min(60, max_time))
3869

    
3870
  if done:
3871
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
3872
  return not cumul_degraded
3873

    
3874

    
3875
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
3876
  """Check that mirrors are not degraded.
3877

3878
  The ldisk parameter, if True, will change the test from the
3879
  is_degraded attribute (which represents overall non-ok status for
3880
  the device(s)) to the ldisk (representing the local storage status).
3881

3882
  """
3883
  lu.cfg.SetDiskID(dev, node)
3884

    
3885
  result = True
3886

    
3887
  if on_primary or dev.AssembleOnSecondary():
3888
    rstats = lu.rpc.call_blockdev_find(node, dev)
3889
    msg = rstats.fail_msg
3890
    if msg:
3891
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
3892
      result = False
3893
    elif not rstats.payload:
3894
      lu.LogWarning("Can't find disk on node %s", node)
3895
      result = False
3896
    else:
3897
      if ldisk:
3898
        result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
3899
      else:
3900
        result = result and not rstats.payload.is_degraded
3901

    
3902
  if dev.children:
3903
    for child in dev.children:
3904
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
3905

    
3906
  return result
3907

    
3908

    
3909
class LUOobCommand(NoHooksLU):
3910
  """Logical unit for OOB handling.
3911

3912
  """
3913
  REG_BGL = False
3914
  _SKIP_MASTER = (constants.OOB_POWER_OFF, constants.OOB_POWER_CYCLE)
3915

    
3916
  def ExpandNames(self):
3917
    """Gather locks we need.
3918

3919
    """
3920
    if self.op.node_names:
3921
      self.op.node_names = _GetWantedNodes(self, self.op.node_names)
3922
      lock_names = self.op.node_names
3923
    else:
3924
      lock_names = locking.ALL_SET
3925

    
3926
    self.needed_locks = {
3927
      locking.LEVEL_NODE: lock_names,
3928
      }
3929

    
3930
  def CheckPrereq(self):
3931
    """Check prerequisites.
3932

3933
    This checks:
3934
     - the node exists in the configuration
3935
     - OOB is supported
3936

3937
    Any errors are signaled by raising errors.OpPrereqError.
3938

3939
    """
3940
    self.nodes = []
3941
    self.master_node = self.cfg.GetMasterNode()
3942

    
3943
    assert self.op.power_delay >= 0.0
3944

    
3945
    if self.op.node_names:
3946
      if (self.op.command in self._SKIP_MASTER and
3947
          self.master_node in self.op.node_names):
3948
        master_node_obj = self.cfg.GetNodeInfo(self.master_node)
3949
        master_oob_handler = _SupportsOob(self.cfg, master_node_obj)
3950

    
3951
        if master_oob_handler:
3952
          additional_text = ("run '%s %s %s' if you want to operate on the"
3953
                             " master regardless") % (master_oob_handler,
3954
                                                      self.op.command,
3955
                                                      self.master_node)
3956
        else:
3957
          additional_text = "it does not support out-of-band operations"
3958

    
3959
        raise errors.OpPrereqError(("Operating on the master node %s is not"
3960
                                    " allowed for %s; %s") %
3961
                                   (self.master_node, self.op.command,
3962
                                    additional_text), errors.ECODE_INVAL)
3963
    else:
3964
      self.op.node_names = self.cfg.GetNodeList()
3965
      if self.op.command in self._SKIP_MASTER:
3966
        self.op.node_names.remove(self.master_node)
3967

    
3968
    if self.op.command in self._SKIP_MASTER:
3969
      assert self.master_node not in self.op.node_names
3970

    
3971
    for (node_name, node) in self.cfg.GetMultiNodeInfo(self.op.node_names):
3972
      if node is None:
3973
        raise errors.OpPrereqError("Node %s not found" % node_name,
3974
                                   errors.ECODE_NOENT)
3975
      else:
3976
        self.nodes.append(node)
3977

    
3978
      if (not self.op.ignore_status and
3979
          (self.op.command == constants.OOB_POWER_OFF and not node.offline)):
3980
        raise errors.OpPrereqError(("Cannot power off node %s because it is"
3981
                                    " not marked offline") % node_name,
3982
                                   errors.ECODE_STATE)
3983

    
3984
  def Exec(self, feedback_fn):
3985
    """Execute OOB and return result if we expect any.
3986

3987
    """
3988
    master_node = self.master_node
3989
    ret = []
3990

    
3991
    for idx, node in enumerate(utils.NiceSort(self.nodes,
3992
                                              key=lambda node: node.name)):
3993
      node_entry = [(constants.RS_NORMAL, node.name)]
3994
      ret.append(node_entry)
3995

    
3996
      oob_program = _SupportsOob(self.cfg, node)
3997

    
3998
      if not oob_program:
3999
        node_entry.append((constants.RS_UNAVAIL, None))
4000
        continue
4001

    
4002
      logging.info("Executing out-of-band command '%s' using '%s' on %s",
4003
                   self.op.command, oob_program, node.name)
4004
      result = self.rpc.call_run_oob(master_node, oob_program,
4005
                                     self.op.command, node.name,
4006
                                     self.op.timeout)
4007

    
4008
      if result.fail_msg:
4009
        self.LogWarning("Out-of-band RPC failed on node '%s': %s",
4010
                        node.name, result.fail_msg)
4011
        node_entry.append((constants.RS_NODATA, None))
4012
      else:
4013
        try:
4014
          self._CheckPayload(result)
4015
        except errors.OpExecError, err:
4016
          self.LogWarning("Payload returned by node '%s' is not valid: %s",
4017
                          node.name, err)
4018
          node_entry.append((constants.RS_NODATA, None))
4019
        else:
4020
          if self.op.command == constants.OOB_HEALTH:
4021
            # For health we should log important events
4022
            for item, status in result.payload:
4023
              if status in [constants.OOB_STATUS_WARNING,
4024
                            constants.OOB_STATUS_CRITICAL]:
4025
                self.LogWarning("Item '%s' on node '%s' has status '%s'",
4026
                                item, node.name, status)
4027

    
4028
          if self.op.command == constants.OOB_POWER_ON:
4029
            node.powered = True
4030
          elif self.op.command == constants.OOB_POWER_OFF:
4031
            node.powered = False
4032
          elif self.op.command == constants.OOB_POWER_STATUS:
4033
            powered = result.payload[constants.OOB_POWER_STATUS_POWERED]
4034
            if powered != node.powered:
4035
              logging.warning(("Recorded power state (%s) of node '%s' does not"
4036
                               " match actual power state (%s)"), node.powered,
4037
                              node.name, powered)
4038

    
4039
          # For configuration changing commands we should update the node
4040
          if self.op.command in (constants.OOB_POWER_ON,
4041
                                 constants.OOB_POWER_OFF):
4042
            self.cfg.Update(node, feedback_fn)
4043

    
4044
          node_entry.append((constants.RS_NORMAL, result.payload))
4045

    
4046
          if (self.op.command == constants.OOB_POWER_ON and
4047
              idx < len(self.nodes) - 1):
4048
            time.sleep(self.op.power_delay)
4049

    
4050
    return ret
4051

    
4052
  def _CheckPayload(self, result):
4053
    """Checks if the payload is valid.
4054

4055
    @param result: RPC result
4056
    @raises errors.OpExecError: If payload is not valid
4057

4058
    """
4059
    errs = []
4060
    if self.op.command == constants.OOB_HEALTH:
4061
      if not isinstance(result.payload, list):
4062
        errs.append("command 'health' is expected to return a list but got %s" %
4063
                    type(result.payload))
4064
      else:
4065
        for item, status in result.payload:
4066
          if status not in constants.OOB_STATUSES:
4067
            errs.append("health item '%s' has invalid status '%s'" %
4068
                        (item, status))
4069

    
4070
    if self.op.command == constants.OOB_POWER_STATUS:
4071
      if not isinstance(result.payload, dict):
4072
        errs.append("power-status is expected to return a dict but got %s" %
4073
                    type(result.payload))
4074

    
4075
    if self.op.command in [
4076
        constants.OOB_POWER_ON,
4077
        constants.OOB_POWER_OFF,
4078
        constants.OOB_POWER_CYCLE,
4079
        ]:
4080
      if result.payload is not None:
4081
        errs.append("%s is expected to not return payload but got '%s'" %
4082
                    (self.op.command, result.payload))
4083

    
4084
    if errs:
4085
      raise errors.OpExecError("Check of out-of-band payload failed due to %s" %
4086
                               utils.CommaJoin(errs))
4087

    
4088

    
4089
class _OsQuery(_QueryBase):
4090
  FIELDS = query.OS_FIELDS
4091

    
4092
  def ExpandNames(self, lu):
4093
    # Lock all nodes in shared mode
4094
    # Temporary removal of locks, should be reverted later
4095
    # TODO: reintroduce locks when they are lighter-weight
4096
    lu.needed_locks = {}
4097
    #self.share_locks[locking.LEVEL_NODE] = 1
4098
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4099

    
4100
    # The following variables interact with _QueryBase._GetNames
4101
    if self.names:
4102
      self.wanted = self.names
4103
    else:
4104
      self.wanted = locking.ALL_SET
4105

    
4106
    self.do_locking = self.use_locking
4107

    
4108
  def DeclareLocks(self, lu, level):
4109
    pass
4110

    
4111
  @staticmethod
4112
  def _DiagnoseByOS(rlist):
4113
    """Remaps a per-node return list into an a per-os per-node dictionary
4114

4115
    @param rlist: a map with node names as keys and OS objects as values
4116

4117
    @rtype: dict
4118
    @return: a dictionary with osnames as keys and as value another
4119
        map, with nodes as keys and tuples of (path, status, diagnose,
4120
        variants, parameters, api_versions) as values, eg::
4121

4122
          {"debian-etch": {"node1": [(/usr/lib/..., True, "", [], []),
4123
                                     (/srv/..., False, "invalid api")],
4124
                           "node2": [(/srv/..., True, "", [], [])]}
4125
          }
4126

4127
    """
4128
    all_os = {}
4129
    # we build here the list of nodes that didn't fail the RPC (at RPC
4130
    # level), so that nodes with a non-responding node daemon don't
4131
    # make all OSes invalid
4132
    good_nodes = [node_name for node_name in rlist
4133
                  if not rlist[node_name].fail_msg]
4134
    for node_name, nr in rlist.items():
4135
      if nr.fail_msg or not nr.payload:
4136
        continue
4137
      for (name, path, status, diagnose, variants,
4138
           params, api_versions) in nr.payload:
4139
        if name not in all_os:
4140
          # build a list of nodes for this os containing empty lists
4141
          # for each node in node_list
4142
          all_os[name] = {}
4143
          for nname in good_nodes:
4144
            all_os[name][nname] = []
4145
        # convert params from [name, help] to (name, help)
4146
        params = [tuple(v) for v in params]
4147
        all_os[name][node_name].append((path, status, diagnose,
4148
                                        variants, params, api_versions))
4149
    return all_os
4150

    
4151
  def _GetQueryData(self, lu):
4152
    """Computes the list of nodes and their attributes.
4153

4154
    """
4155
    # Locking is not used
4156
    assert not (compat.any(lu.glm.is_owned(level)
4157
                           for level in locking.LEVELS
4158
                           if level != locking.LEVEL_CLUSTER) or
4159
                self.do_locking or self.use_locking)
4160

    
4161
    valid_nodes = [node.name
4162
                   for node in lu.cfg.GetAllNodesInfo().values()
4163
                   if not node.offline and node.vm_capable]
4164
    pol = self._DiagnoseByOS(lu.rpc.call_os_diagnose(valid_nodes))
4165
    cluster = lu.cfg.GetClusterInfo()
4166

    
4167
    data = {}
4168

    
4169
    for (os_name, os_data) in pol.items():
4170
      info = query.OsInfo(name=os_name, valid=True, node_status=os_data,
4171
                          hidden=(os_name in cluster.hidden_os),
4172
                          blacklisted=(os_name in cluster.blacklisted_os))
4173

    
4174
      variants = set()
4175
      parameters = set()
4176
      api_versions = set()
4177

    
4178
      for idx, osl in enumerate(os_data.values()):
4179
        info.valid = bool(info.valid and osl and osl[0][1])
4180
        if not info.valid:
4181
          break
4182

    
4183
        (node_variants, node_params, node_api) = osl[0][3:6]
4184
        if idx == 0:
4185
          # First entry
4186
          variants.update(node_variants)
4187
          parameters.update(node_params)
4188
          api_versions.update(node_api)
4189
        else:
4190
          # Filter out inconsistent values
4191
          variants.intersection_update(node_variants)
4192
          parameters.intersection_update(node_params)
4193
          api_versions.intersection_update(node_api)
4194

    
4195
      info.variants = list(variants)
4196
      info.parameters = list(parameters)
4197
      info.api_versions = list(api_versions)
4198

    
4199
      data[os_name] = info
4200

    
4201
    # Prepare data in requested order
4202
    return [data[name] for name in self._GetNames(lu, pol.keys(), None)
4203
            if name in data]
4204

    
4205

    
4206
class LUOsDiagnose(NoHooksLU):
4207
  """Logical unit for OS diagnose/query.
4208

4209
  """
4210
  REQ_BGL = False
4211

    
4212
  @staticmethod
4213
  def _BuildFilter(fields, names):
4214
    """Builds a filter for querying OSes.
4215

4216
    """
4217
    name_filter = qlang.MakeSimpleFilter("name", names)
4218

    
4219
    # Legacy behaviour: Hide hidden, blacklisted or invalid OSes if the
4220
    # respective field is not requested
4221
    status_filter = [[qlang.OP_NOT, [qlang.OP_TRUE, fname]]
4222
                     for fname in ["hidden", "blacklisted"]
4223
                     if fname not in fields]
4224
    if "valid" not in fields:
4225
      status_filter.append([qlang.OP_TRUE, "valid"])
4226

    
4227
    if status_filter:
4228
      status_filter.insert(0, qlang.OP_AND)
4229
    else:
4230
      status_filter = None
4231

    
4232
    if name_filter and status_filter:
4233
      return [qlang.OP_AND, name_filter, status_filter]
4234
    elif name_filter:
4235
      return name_filter
4236
    else:
4237
      return status_filter
4238

    
4239
  def CheckArguments(self):
4240
    self.oq = _OsQuery(self._BuildFilter(self.op.output_fields, self.op.names),
4241
                       self.op.output_fields, False)
4242

    
4243
  def ExpandNames(self):
4244
    self.oq.ExpandNames(self)
4245

    
4246
  def Exec(self, feedback_fn):
4247
    return self.oq.OldStyleQuery(self)
4248

    
4249

    
4250
class LUNodeRemove(LogicalUnit):
4251
  """Logical unit for removing a node.
4252

4253
  """
4254
  HPATH = "node-remove"
4255
  HTYPE = constants.HTYPE_NODE
4256

    
4257
  def BuildHooksEnv(self):
4258
    """Build hooks env.
4259

4260
    This doesn't run on the target node in the pre phase as a failed
4261
    node would then be impossible to remove.
4262

4263
    """
4264
    return {
4265
      "OP_TARGET": self.op.node_name,
4266
      "NODE_NAME": self.op.node_name,
4267
      }
4268

    
4269
  def BuildHooksNodes(self):
4270
    """Build hooks nodes.
4271

4272
    """
4273
    all_nodes = self.cfg.GetNodeList()
4274
    try:
4275
      all_nodes.remove(self.op.node_name)
4276
    except ValueError:
4277
      logging.warning("Node '%s', which is about to be removed, was not found"
4278
                      " in the list of all nodes", self.op.node_name)
4279
    return (all_nodes, all_nodes)
4280

    
4281
  def CheckPrereq(self):
4282
    """Check prerequisites.
4283

4284
    This checks:
4285
     - the node exists in the configuration
4286
     - it does not have primary or secondary instances
4287
     - it's not the master
4288

4289
    Any errors are signaled by raising errors.OpPrereqError.
4290

4291
    """
4292
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4293
    node = self.cfg.GetNodeInfo(self.op.node_name)
4294
    assert node is not None
4295

    
4296
    masternode = self.cfg.GetMasterNode()
4297
    if node.name == masternode:
4298
      raise errors.OpPrereqError("Node is the master node, failover to another"
4299
                                 " node is required", errors.ECODE_INVAL)
4300

    
4301
    for instance_name, instance in self.cfg.GetAllInstancesInfo():
4302
      if node.name in instance.all_nodes:
4303
        raise errors.OpPrereqError("Instance %s is still running on the node,"
4304
                                   " please remove first" % instance_name,
4305
                                   errors.ECODE_INVAL)
4306
    self.op.node_name = node.name
4307
    self.node = node
4308

    
4309
  def Exec(self, feedback_fn):
4310
    """Removes the node from the cluster.
4311

4312
    """
4313
    node = self.node
4314
    logging.info("Stopping the node daemon and removing configs from node %s",
4315
                 node.name)
4316

    
4317
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
4318

    
4319
    # Promote nodes to master candidate as needed
4320
    _AdjustCandidatePool(self, exceptions=[node.name])
4321
    self.context.RemoveNode(node.name)
4322

    
4323
    # Run post hooks on the node before it's removed
4324
    _RunPostHook(self, node.name)
4325

    
4326
    result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
4327
    msg = result.fail_msg
4328
    if msg:
4329
      self.LogWarning("Errors encountered on the remote node while leaving"
4330
                      " the cluster: %s", msg)
4331

    
4332
    # Remove node from our /etc/hosts
4333
    if self.cfg.GetClusterInfo().modify_etc_hosts:
4334
      master_node = self.cfg.GetMasterNode()
4335
      result = self.rpc.call_etc_hosts_modify(master_node,
4336
                                              constants.ETC_HOSTS_REMOVE,
4337
                                              node.name, None)
4338
      result.Raise("Can't update hosts file with new host data")
4339
      _RedistributeAncillaryFiles(self)
4340

    
4341

    
4342
class _NodeQuery(_QueryBase):
4343
  FIELDS = query.NODE_FIELDS
4344

    
4345
  def ExpandNames(self, lu):
4346
    lu.needed_locks = {}
4347
    lu.share_locks = _ShareAll()
4348

    
4349
    if self.names:
4350
      self.wanted = _GetWantedNodes(lu, self.names)
4351
    else:
4352
      self.wanted = locking.ALL_SET
4353

    
4354
    self.do_locking = (self.use_locking and
4355
                       query.NQ_LIVE in self.requested_data)
4356

    
4357
    if self.do_locking:
4358
      # If any non-static field is requested we need to lock the nodes
4359
      lu.needed_locks[locking.LEVEL_NODE] = self.wanted
4360

    
4361
  def DeclareLocks(self, lu, level):
4362
    pass
4363

    
4364
  def _GetQueryData(self, lu):
4365
    """Computes the list of nodes and their attributes.
4366

4367
    """
4368
    all_info = lu.cfg.GetAllNodesInfo()
4369

    
4370
    nodenames = self._GetNames(lu, all_info.keys(), locking.LEVEL_NODE)
4371

    
4372
    # Gather data as requested
4373
    if query.NQ_LIVE in self.requested_data:
4374
      # filter out non-vm_capable nodes
4375
      toquery_nodes = [name for name in nodenames if all_info[name].vm_capable]
4376

    
4377
      node_data = lu.rpc.call_node_info(toquery_nodes, lu.cfg.GetVGName(),
4378
                                        lu.cfg.GetHypervisorType())
4379
      live_data = dict((name, nresult.payload)
4380
                       for (name, nresult) in node_data.items()
4381
                       if not nresult.fail_msg and nresult.payload)
4382
    else:
4383
      live_data = None
4384

    
4385
    if query.NQ_INST in self.requested_data:
4386
      node_to_primary = dict([(name, set()) for name in nodenames])
4387
      node_to_secondary = dict([(name, set()) for name in nodenames])
4388

    
4389
      inst_data = lu.cfg.GetAllInstancesInfo()
4390

    
4391
      for inst in inst_data.values():
4392
        if inst.primary_node in node_to_primary:
4393
          node_to_primary[inst.primary_node].add(inst.name)
4394
        for secnode in inst.secondary_nodes:
4395
          if secnode in node_to_secondary:
4396
            node_to_secondary[secnode].add(inst.name)
4397
    else:
4398
      node_to_primary = None
4399
      node_to_secondary = None
4400

    
4401
    if query.NQ_OOB in self.requested_data:
4402
      oob_support = dict((name, bool(_SupportsOob(lu.cfg, node)))
4403
                         for name, node in all_info.iteritems())
4404
    else:
4405
      oob_support = None
4406

    
4407
    if query.NQ_GROUP in self.requested_data:
4408
      groups = lu.cfg.GetAllNodeGroupsInfo()
4409
    else:
4410
      groups = {}
4411

    
4412
    return query.NodeQueryData([all_info[name] for name in nodenames],
4413
                               live_data, lu.cfg.GetMasterNode(),
4414
                               node_to_primary, node_to_secondary, groups,
4415
                               oob_support, lu.cfg.GetClusterInfo())
4416

    
4417

    
4418
class LUNodeQuery(NoHooksLU):
4419
  """Logical unit for querying nodes.
4420

4421
  """
4422
  # pylint: disable=W0142
4423
  REQ_BGL = False
4424

    
4425
  def CheckArguments(self):
4426
    self.nq = _NodeQuery(qlang.MakeSimpleFilter("name", self.op.names),
4427
                         self.op.output_fields, self.op.use_locking)
4428

    
4429
  def ExpandNames(self):
4430
    self.nq.ExpandNames(self)
4431

    
4432
  def Exec(self, feedback_fn):
4433
    return self.nq.OldStyleQuery(self)
4434

    
4435

    
4436
class LUNodeQueryvols(NoHooksLU):
4437
  """Logical unit for getting volumes on node(s).
4438

4439
  """
4440
  REQ_BGL = False
4441
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
4442
  _FIELDS_STATIC = utils.FieldSet("node")
4443

    
4444
  def CheckArguments(self):
4445
    _CheckOutputFields(static=self._FIELDS_STATIC,
4446
                       dynamic=self._FIELDS_DYNAMIC,
4447
                       selected=self.op.output_fields)
4448

    
4449
  def ExpandNames(self):
4450
    self.needed_locks = {}
4451
    self.share_locks[locking.LEVEL_NODE] = 1
4452
    if not self.op.nodes:
4453
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4454
    else:
4455
      self.needed_locks[locking.LEVEL_NODE] = \
4456
        _GetWantedNodes(self, self.op.nodes)
4457

    
4458
  def Exec(self, feedback_fn):
4459
    """Computes the list of nodes and their attributes.
4460

4461
    """
4462
    nodenames = self.owned_locks(locking.LEVEL_NODE)
4463
    volumes = self.rpc.call_node_volumes(nodenames)
4464

    
4465
    ilist = self.cfg.GetAllInstancesInfo()
4466
    vol2inst = _MapInstanceDisksToNodes(ilist.values())
4467

    
4468
    output = []
4469
    for node in nodenames:
4470
      nresult = volumes[node]
4471
      if nresult.offline:
4472
        continue
4473
      msg = nresult.fail_msg
4474
      if msg:
4475
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
4476
        continue
4477

    
4478
      node_vols = sorted(nresult.payload,
4479
                         key=operator.itemgetter("dev"))
4480

    
4481
      for vol in node_vols:
4482
        node_output = []
4483
        for field in self.op.output_fields:
4484
          if field == "node":
4485
            val = node
4486
          elif field == "phys":
4487
            val = vol["dev"]
4488
          elif field == "vg":
4489
            val = vol["vg"]
4490
          elif field == "name":
4491
            val = vol["name"]
4492
          elif field == "size":
4493
            val = int(float(vol["size"]))
4494
          elif field == "instance":
4495
            val = vol2inst.get((node, vol["vg"] + "/" + vol["name"]), "-")
4496
          else:
4497
            raise errors.ParameterError(field)
4498
          node_output.append(str(val))
4499

    
4500
        output.append(node_output)
4501

    
4502
    return output
4503

    
4504

    
4505
class LUNodeQueryStorage(NoHooksLU):
4506
  """Logical unit for getting information on storage units on node(s).
4507

4508
  """
4509
  _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
4510
  REQ_BGL = False
4511

    
4512
  def CheckArguments(self):
4513
    _CheckOutputFields(static=self._FIELDS_STATIC,
4514
                       dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
4515
                       selected=self.op.output_fields)
4516

    
4517
  def ExpandNames(self):
4518
    self.needed_locks = {}
4519
    self.share_locks[locking.LEVEL_NODE] = 1
4520

    
4521
    if self.op.nodes:
4522
      self.needed_locks[locking.LEVEL_NODE] = \
4523
        _GetWantedNodes(self, self.op.nodes)
4524
    else:
4525
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4526

    
4527
  def Exec(self, feedback_fn):
4528
    """Computes the list of nodes and their attributes.
4529

4530
    """
4531
    self.nodes = self.owned_locks(locking.LEVEL_NODE)
4532

    
4533
    # Always get name to sort by
4534
    if constants.SF_NAME in self.op.output_fields:
4535
      fields = self.op.output_fields[:]
4536
    else:
4537
      fields = [constants.SF_NAME] + self.op.output_fields
4538

    
4539
    # Never ask for node or type as it's only known to the LU
4540
    for extra in [constants.SF_NODE, constants.SF_TYPE]:
4541
      while extra in fields:
4542
        fields.remove(extra)
4543

    
4544
    field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
4545
    name_idx = field_idx[constants.SF_NAME]
4546

    
4547
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
4548
    data = self.rpc.call_storage_list(self.nodes,
4549
                                      self.op.storage_type, st_args,
4550
                                      self.op.name, fields)
4551

    
4552
    result = []
4553

    
4554
    for node in utils.NiceSort(self.nodes):
4555
      nresult = data[node]
4556
      if nresult.offline:
4557
        continue
4558

    
4559
      msg = nresult.fail_msg
4560
      if msg:
4561
        self.LogWarning("Can't get storage data from node %s: %s", node, msg)
4562
        continue
4563

    
4564
      rows = dict([(row[name_idx], row) for row in nresult.payload])
4565

    
4566
      for name in utils.NiceSort(rows.keys()):
4567
        row = rows[name]
4568

    
4569
        out = []
4570

    
4571
        for field in self.op.output_fields:
4572
          if field == constants.SF_NODE:
4573
            val = node
4574
          elif field == constants.SF_TYPE:
4575
            val = self.op.storage_type
4576
          elif field in field_idx:
4577
            val = row[field_idx[field]]
4578
          else:
4579
            raise errors.ParameterError(field)
4580

    
4581
          out.append(val)
4582

    
4583
        result.append(out)
4584

    
4585
    return result
4586

    
4587

    
4588
class _InstanceQuery(_QueryBase):
4589
  FIELDS = query.INSTANCE_FIELDS
4590

    
4591
  def ExpandNames(self, lu):
4592
    lu.needed_locks = {}
4593
    lu.share_locks = _ShareAll()
4594

    
4595
    if self.names:
4596
      self.wanted = _GetWantedInstances(lu, self.names)
4597
    else:
4598
      self.wanted = locking.ALL_SET
4599

    
4600
    self.do_locking = (self.use_locking and
4601
                       query.IQ_LIVE in self.requested_data)
4602
    if self.do_locking:
4603
      lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
4604
      lu.needed_locks[locking.LEVEL_NODEGROUP] = []
4605
      lu.needed_locks[locking.LEVEL_NODE] = []
4606
      lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4607

    
4608
    self.do_grouplocks = (self.do_locking and
4609
                          query.IQ_NODES in self.requested_data)
4610

    
4611
  def DeclareLocks(self, lu, level):
4612
    if self.do_locking:
4613
      if level == locking.LEVEL_NODEGROUP and self.do_grouplocks:
4614
        assert not lu.needed_locks[locking.LEVEL_NODEGROUP]
4615

    
4616
        # Lock all groups used by instances optimistically; this requires going
4617
        # via the node before it's locked, requiring verification later on
4618
        lu.needed_locks[locking.LEVEL_NODEGROUP] = \
4619
          set(group_uuid
4620
              for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
4621
              for group_uuid in lu.cfg.GetInstanceNodeGroups(instance_name))
4622
      elif level == locking.LEVEL_NODE:
4623
        lu._LockInstancesNodes() # pylint: disable=W0212
4624

    
4625
  @staticmethod
4626
  def _CheckGroupLocks(lu):
4627
    owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
4628
    owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP))
4629

    
4630
    # Check if node groups for locked instances are still correct
4631
    for instance_name in owned_instances:
4632
      _CheckInstanceNodeGroups(lu.cfg, instance_name, owned_groups)
4633

    
4634
  def _GetQueryData(self, lu):
4635
    """Computes the list of instances and their attributes.
4636

4637
    """
4638
    if self.do_grouplocks:
4639
      self._CheckGroupLocks(lu)
4640

    
4641
    cluster = lu.cfg.GetClusterInfo()
4642
    all_info = lu.cfg.GetAllInstancesInfo()
4643

    
4644
    instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
4645

    
4646
    instance_list = [all_info[name] for name in instance_names]
4647
    nodes = frozenset(itertools.chain(*(inst.all_nodes
4648
                                        for inst in instance_list)))
4649
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
4650
    bad_nodes = []
4651
    offline_nodes = []
4652
    wrongnode_inst = set()
4653

    
4654
    # Gather data as requested
4655
    if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
4656
      live_data = {}
4657
      node_data = lu.rpc.call_all_instances_info(nodes, hv_list)
4658
      for name in nodes:
4659
        result = node_data[name]
4660
        if result.offline:
4661
          # offline nodes will be in both lists
4662
          assert result.fail_msg
4663
          offline_nodes.append(name)
4664
        if result.fail_msg:
4665
          bad_nodes.append(name)
4666
        elif result.payload:
4667
          for inst in result.payload:
4668
            if inst in all_info:
4669
              if all_info[inst].primary_node == name:
4670
                live_data.update(result.payload)
4671
              else:
4672
                wrongnode_inst.add(inst)
4673
            else:
4674
              # orphan instance; we don't list it here as we don't
4675
              # handle this case yet in the output of instance listing
4676
              logging.warning("Orphan instance '%s' found on node %s",
4677
                              inst, name)
4678
        # else no instance is alive
4679
    else:
4680
      live_data = {}
4681

    
4682
    if query.IQ_DISKUSAGE in self.requested_data:
4683
      disk_usage = dict((inst.name,
4684
                         _ComputeDiskSize(inst.disk_template,
4685
                                          [{constants.IDISK_SIZE: disk.size}
4686
                                           for disk in inst.disks]))
4687
                        for inst in instance_list)
4688
    else:
4689
      disk_usage = None
4690

    
4691
    if query.IQ_CONSOLE in self.requested_data:
4692
      consinfo = {}
4693
      for inst in instance_list:
4694
        if inst.name in live_data:
4695
          # Instance is running
4696
          consinfo[inst.name] = _GetInstanceConsole(cluster, inst)
4697
        else:
4698
          consinfo[inst.name] = None
4699
      assert set(consinfo.keys()) == set(instance_names)
4700
    else:
4701
      consinfo = None
4702

    
4703
    if query.IQ_NODES in self.requested_data:
4704
      node_names = set(itertools.chain(*map(operator.attrgetter("all_nodes"),
4705
                                            instance_list)))
4706
      nodes = dict(lu.cfg.GetMultiNodeInfo(node_names))
4707
      groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
4708
                    for uuid in set(map(operator.attrgetter("group"),
4709
                                        nodes.values())))
4710
    else:
4711
      nodes = None
4712
      groups = None
4713

    
4714
    return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
4715
                                   disk_usage, offline_nodes, bad_nodes,
4716
                                   live_data, wrongnode_inst, consinfo,
4717
                                   nodes, groups)
4718

    
4719

    
4720
class LUQuery(NoHooksLU):
4721
  """Query for resources/items of a certain kind.
4722

4723
  """
4724
  # pylint: disable=W0142
4725
  REQ_BGL = False
4726

    
4727
  def CheckArguments(self):
4728
    qcls = _GetQueryImplementation(self.op.what)
4729

    
4730
    self.impl = qcls(self.op.filter, self.op.fields, self.op.use_locking)
4731

    
4732
  def ExpandNames(self):
4733
    self.impl.ExpandNames(self)
4734

    
4735
  def DeclareLocks(self, level):
4736
    self.impl.DeclareLocks(self, level)
4737

    
4738
  def Exec(self, feedback_fn):
4739
    return self.impl.NewStyleQuery(self)
4740

    
4741

    
4742
class LUQueryFields(NoHooksLU):
4743
  """Query for resources/items of a certain kind.
4744

4745
  """
4746
  # pylint: disable=W0142
4747
  REQ_BGL = False
4748

    
4749
  def CheckArguments(self):
4750
    self.qcls = _GetQueryImplementation(self.op.what)
4751

    
4752
  def ExpandNames(self):
4753
    self.needed_locks = {}
4754

    
4755
  def Exec(self, feedback_fn):
4756
    return query.QueryFields(self.qcls.FIELDS, self.op.fields)
4757

    
4758

    
4759
class LUNodeModifyStorage(NoHooksLU):
4760
  """Logical unit for modifying a storage volume on a node.
4761

4762
  """
4763
  REQ_BGL = False
4764

    
4765
  def CheckArguments(self):
4766
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4767

    
4768
    storage_type = self.op.storage_type
4769

    
4770
    try:
4771
      modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
4772
    except KeyError:
4773
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
4774
                                 " modified" % storage_type,
4775
                                 errors.ECODE_INVAL)
4776

    
4777
    diff = set(self.op.changes.keys()) - modifiable
4778
    if diff:
4779
      raise errors.OpPrereqError("The following fields can not be modified for"
4780
                                 " storage units of type '%s': %r" %
4781
                                 (storage_type, list(diff)),
4782
                                 errors.ECODE_INVAL)
4783

    
4784
  def ExpandNames(self):
4785
    self.needed_locks = {
4786
      locking.LEVEL_NODE: self.op.node_name,
4787
      }
4788

    
4789
  def Exec(self, feedback_fn):
4790
    """Computes the list of nodes and their attributes.
4791

4792
    """
4793
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
4794
    result = self.rpc.call_storage_modify(self.op.node_name,
4795
                                          self.op.storage_type, st_args,
4796
                                          self.op.name, self.op.changes)
4797
    result.Raise("Failed to modify storage unit '%s' on %s" %
4798
                 (self.op.name, self.op.node_name))
4799

    
4800

    
4801
class LUNodeAdd(LogicalUnit):
4802
  """Logical unit for adding node to the cluster.
4803

4804
  """
4805
  HPATH = "node-add"
4806
  HTYPE = constants.HTYPE_NODE
4807
  _NFLAGS = ["master_capable", "vm_capable"]
4808

    
4809
  def CheckArguments(self):
4810
    self.primary_ip_family = self.cfg.GetPrimaryIPFamily()
4811
    # validate/normalize the node name
4812
    self.hostname = netutils.GetHostname(name=self.op.node_name,
4813
                                         family=self.primary_ip_family)
4814
    self.op.node_name = self.hostname.name
4815

    
4816
    if self.op.readd and self.op.node_name == self.cfg.GetMasterNode():
4817
      raise errors.OpPrereqError("Cannot readd the master node",
4818
                                 errors.ECODE_STATE)
4819

    
4820
    if self.op.readd and self.op.group:
4821
      raise errors.OpPrereqError("Cannot pass a node group when a node is"
4822
                                 " being readded", errors.ECODE_INVAL)
4823

    
4824
  def BuildHooksEnv(self):
4825
    """Build hooks env.
4826

4827
    This will run on all nodes before, and on all nodes + the new node after.
4828

4829
    """
4830
    return {
4831
      "OP_TARGET": self.op.node_name,
4832
      "NODE_NAME": self.op.node_name,
4833
      "NODE_PIP": self.op.primary_ip,
4834
      "NODE_SIP": self.op.secondary_ip,
4835
      "MASTER_CAPABLE": str(self.op.master_capable),
4836
      "VM_CAPABLE": str(self.op.vm_capable),
4837
      }
4838

    
4839
  def BuildHooksNodes(self):
4840
    """Build hooks nodes.
4841

4842
    """
4843
    # Exclude added node
4844
    pre_nodes = list(set(self.cfg.GetNodeList()) - set([self.op.node_name]))
4845
    post_nodes = pre_nodes + [self.op.node_name, ]
4846

    
4847
    return (pre_nodes, post_nodes)
4848

    
4849
  def CheckPrereq(self):
4850
    """Check prerequisites.
4851

4852
    This checks:
4853
     - the new node is not already in the config
4854
     - it is resolvable
4855
     - its parameters (single/dual homed) matches the cluster
4856

4857
    Any errors are signaled by raising errors.OpPrereqError.
4858

4859
    """
4860
    cfg = self.cfg
4861
    hostname = self.hostname
4862
    node = hostname.name
4863
    primary_ip = self.op.primary_ip = hostname.ip
4864
    if self.op.secondary_ip is None:
4865
      if self.primary_ip_family == netutils.IP6Address.family:
4866
        raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
4867
                                   " IPv4 address must be given as secondary",
4868
                                   errors.ECODE_INVAL)
4869
      self.op.secondary_ip = primary_ip
4870

    
4871
    secondary_ip = self.op.secondary_ip
4872
    if not netutils.IP4Address.IsValid(secondary_ip):
4873
      raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
4874
                                 " address" % secondary_ip, errors.ECODE_INVAL)
4875

    
4876
    node_list = cfg.GetNodeList()
4877
    if not self.op.readd and node in node_list:
4878
      raise errors.OpPrereqError("Node %s is already in the configuration" %
4879
                                 node, errors.ECODE_EXISTS)
4880
    elif self.op.readd and node not in node_list:
4881
      raise errors.OpPrereqError("Node %s is not in the configuration" % node,
4882
                                 errors.ECODE_NOENT)
4883

    
4884
    self.changed_primary_ip = False
4885

    
4886
    for existing_node_name, existing_node in cfg.GetMultiNodeInfo(node_list):
4887
      if self.op.readd and node == existing_node_name:
4888
        if existing_node.secondary_ip != secondary_ip:
4889
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
4890
                                     " address configuration as before",
4891
                                     errors.ECODE_INVAL)
4892
        if existing_node.primary_ip != primary_ip:
4893
          self.changed_primary_ip = True
4894

    
4895
        continue
4896

    
4897
      if (existing_node.primary_ip == primary_ip or
4898
          existing_node.secondary_ip == primary_ip or
4899
          existing_node.primary_ip == secondary_ip or
4900
          existing_node.secondary_ip == secondary_ip):
4901
        raise errors.OpPrereqError("New node ip address(es) conflict with"
4902
                                   " existing node %s" % existing_node.name,
4903
                                   errors.ECODE_NOTUNIQUE)
4904

    
4905
    # After this 'if' block, None is no longer a valid value for the
4906
    # _capable op attributes
4907
    if self.op.readd:
4908
      old_node = self.cfg.GetNodeInfo(node)
4909
      assert old_node is not None, "Can't retrieve locked node %s" % node
4910
      for attr in self._NFLAGS:
4911
        if getattr(self.op, attr) is None:
4912
          setattr(self.op, attr, getattr(old_node, attr))
4913
    else:
4914
      for attr in self._NFLAGS:
4915
        if getattr(self.op, attr) is None:
4916
          setattr(self.op, attr, True)
4917

    
4918
    if self.op.readd and not self.op.vm_capable:
4919
      pri, sec = cfg.GetNodeInstances(node)
4920
      if pri or sec:
4921
        raise errors.OpPrereqError("Node %s being re-added with vm_capable"
4922
                                   " flag set to false, but it already holds"
4923
                                   " instances" % node,
4924
                                   errors.ECODE_STATE)
4925

    
4926
    # check that the type of the node (single versus dual homed) is the
4927
    # same as for the master
4928
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
4929
    master_singlehomed = myself.secondary_ip == myself.primary_ip
4930
    newbie_singlehomed = secondary_ip == primary_ip
4931
    if master_singlehomed != newbie_singlehomed:
4932
      if master_singlehomed:
4933
        raise errors.OpPrereqError("The master has no secondary ip but the"
4934
                                   " new node has one",
4935
                                   errors.ECODE_INVAL)
4936
      else:
4937
        raise errors.OpPrereqError("The master has a secondary ip but the"
4938
                                   " new node doesn't have one",
4939
                                   errors.ECODE_INVAL)
4940

    
4941
    # checks reachability
4942
    if not netutils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
4943
      raise errors.OpPrereqError("Node not reachable by ping",
4944
                                 errors.ECODE_ENVIRON)
4945

    
4946
    if not newbie_singlehomed:
4947
      # check reachability from my secondary ip to newbie's secondary ip
4948
      if not netutils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
4949
                           source=myself.secondary_ip):
4950
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
4951
                                   " based ping to node daemon port",
4952
                                   errors.ECODE_ENVIRON)
4953

    
4954
    if self.op.readd:
4955
      exceptions = [node]
4956
    else:
4957
      exceptions = []
4958

    
4959
    if self.op.master_capable:
4960
      self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
4961
    else:
4962
      self.master_candidate = False
4963

    
4964
    if self.op.readd:
4965
      self.new_node = old_node
4966
    else:
4967
      node_group = cfg.LookupNodeGroup(self.op.group)
4968
      self.new_node = objects.Node(name=node,
4969
                                   primary_ip=primary_ip,
4970
                                   secondary_ip=secondary_ip,
4971
                                   master_candidate=self.master_candidate,
4972
                                   offline=False, drained=False,
4973
                                   group=node_group)
4974

    
4975
    if self.op.ndparams:
4976
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
4977

    
4978
  def Exec(self, feedback_fn):
4979
    """Adds the new node to the cluster.
4980

4981
    """
4982
    new_node = self.new_node
4983
    node = new_node.name
4984

    
4985
    # We adding a new node so we assume it's powered
4986
    new_node.powered = True
4987

    
4988
    # for re-adds, reset the offline/drained/master-candidate flags;
4989
    # we need to reset here, otherwise offline would prevent RPC calls
4990
    # later in the procedure; this also means that if the re-add
4991
    # fails, we are left with a non-offlined, broken node
4992
    if self.op.readd:
4993
      new_node.drained = new_node.offline = False # pylint: disable=W0201
4994
      self.LogInfo("Readding a node, the offline/drained flags were reset")
4995
      # if we demote the node, we do cleanup later in the procedure
4996
      new_node.master_candidate = self.master_candidate
4997
      if self.changed_primary_ip:
4998
        new_node.primary_ip = self.op.primary_ip
4999

    
5000
    # copy the master/vm_capable flags
5001
    for attr in self._NFLAGS:
5002
      setattr(new_node, attr, getattr(self.op, attr))
5003

    
5004
    # notify the user about any possible mc promotion
5005
    if new_node.master_candidate:
5006
      self.LogInfo("Node will be a master candidate")
5007

    
5008
    if self.op.ndparams:
5009
      new_node.ndparams = self.op.ndparams
5010
    else:
5011
      new_node.ndparams = {}
5012

    
5013
    # check connectivity
5014
    result = self.rpc.call_version([node])[node]
5015
    result.Raise("Can't get version information from node %s" % node)
5016
    if constants.PROTOCOL_VERSION == result.payload:
5017
      logging.info("Communication to node %s fine, sw version %s match",
5018
                   node, result.payload)
5019
    else:
5020
      raise errors.OpExecError("Version mismatch master version %s,"
5021
                               " node version %s" %
5022
                               (constants.PROTOCOL_VERSION, result.payload))
5023

    
5024
    # Add node to our /etc/hosts, and add key to known_hosts
5025
    if self.cfg.GetClusterInfo().modify_etc_hosts:
5026
      master_node = self.cfg.GetMasterNode()
5027
      result = self.rpc.call_etc_hosts_modify(master_node,
5028
                                              constants.ETC_HOSTS_ADD,
5029
                                              self.hostname.name,
5030
                                              self.hostname.ip)
5031
      result.Raise("Can't update hosts file with new host data")
5032

    
5033
    if new_node.secondary_ip != new_node.primary_ip:
5034
      _CheckNodeHasSecondaryIP(self, new_node.name, new_node.secondary_ip,
5035
                               False)
5036

    
5037
    node_verify_list = [self.cfg.GetMasterNode()]
5038
    node_verify_param = {
5039
      constants.NV_NODELIST: [node],
5040
      # TODO: do a node-net-test as well?
5041
    }
5042

    
5043
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
5044
                                       self.cfg.GetClusterName())
5045
    for verifier in node_verify_list:
5046
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
5047
      nl_payload = result[verifier].payload[constants.NV_NODELIST]
5048
      if nl_payload:
5049
        for failed in nl_payload:
5050
          feedback_fn("ssh/hostname verification failed"
5051
                      " (checking from %s): %s" %
5052
                      (verifier, nl_payload[failed]))
5053
        raise errors.OpExecError("ssh/hostname verification failed")
5054

    
5055
    if self.op.readd:
5056
      _RedistributeAncillaryFiles(self)
5057
      self.context.ReaddNode(new_node)
5058
      # make sure we redistribute the config
5059
      self.cfg.Update(new_node, feedback_fn)
5060
      # and make sure the new node will not have old files around
5061
      if not new_node.master_candidate:
5062
        result = self.rpc.call_node_demote_from_mc(new_node.name)
5063
        msg = result.fail_msg
5064
        if msg:
5065
          self.LogWarning("Node failed to demote itself from master"
5066
                          " candidate status: %s" % msg)
5067
    else:
5068
      _RedistributeAncillaryFiles(self, additional_nodes=[node],
5069
                                  additional_vm=self.op.vm_capable)
5070
      self.context.AddNode(new_node, self.proc.GetECId())
5071

    
5072

    
5073
class LUNodeSetParams(LogicalUnit):
5074
  """Modifies the parameters of a node.
5075

5076
  @cvar _F2R: a dictionary from tuples of flags (mc, drained, offline)
5077
      to the node role (as _ROLE_*)
5078
  @cvar _R2F: a dictionary from node role to tuples of flags
5079
  @cvar _FLAGS: a list of attribute names corresponding to the flags
5080

5081
  """
5082
  HPATH = "node-modify"
5083
  HTYPE = constants.HTYPE_NODE
5084
  REQ_BGL = False
5085
  (_ROLE_CANDIDATE, _ROLE_DRAINED, _ROLE_OFFLINE, _ROLE_REGULAR) = range(4)
5086
  _F2R = {
5087
    (True, False, False): _ROLE_CANDIDATE,
5088
    (False, True, False): _ROLE_DRAINED,
5089
    (False, False, True): _ROLE_OFFLINE,
5090
    (False, False, False): _ROLE_REGULAR,
5091
    }
5092
  _R2F = dict((v, k) for k, v in _F2R.items())
5093
  _FLAGS = ["master_candidate", "drained", "offline"]
5094

    
5095
  def CheckArguments(self):
5096
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
5097
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained,
5098
                self.op.master_capable, self.op.vm_capable,
5099
                self.op.secondary_ip, self.op.ndparams]
5100
    if all_mods.count(None) == len(all_mods):
5101
      raise errors.OpPrereqError("Please pass at least one modification",
5102
                                 errors.ECODE_INVAL)
5103
    if all_mods.count(True) > 1:
5104
      raise errors.OpPrereqError("Can't set the node into more than one"
5105
                                 " state at the same time",
5106
                                 errors.ECODE_INVAL)
5107

    
5108
    # Boolean value that tells us whether we might be demoting from MC
5109
    self.might_demote = (self.op.master_candidate == False or
5110
                         self.op.offline == True or
5111
                         self.op.drained == True or
5112
                         self.op.master_capable == False)
5113

    
5114
    if self.op.secondary_ip:
5115
      if not netutils.IP4Address.IsValid(self.op.secondary_ip):
5116
        raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
5117
                                   " address" % self.op.secondary_ip,
5118
                                   errors.ECODE_INVAL)
5119

    
5120
    self.lock_all = self.op.auto_promote and self.might_demote
5121
    self.lock_instances = self.op.secondary_ip is not None
5122

    
5123
  def ExpandNames(self):
5124
    if self.lock_all:
5125
      self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
5126
    else:
5127
      self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
5128

    
5129
    if self.lock_instances:
5130
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
5131

    
5132
  def DeclareLocks(self, level):
5133
    # If we have locked all instances, before waiting to lock nodes, release
5134
    # all the ones living on nodes unrelated to the current operation.
5135
    if level == locking.LEVEL_NODE and self.lock_instances:
5136
      self.affected_instances = []
5137
      if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
5138
        instances_keep = []
5139

    
5140
        # Build list of instances to release
5141
        locked_i = self.owned_locks(locking.LEVEL_INSTANCE)
5142
        for instance_name, instance in self.cfg.GetMultiInstanceInfo(locked_i):
5143
          if (instance.disk_template in constants.DTS_INT_MIRROR and
5144
              self.op.node_name in instance.all_nodes):
5145
            instances_keep.append(instance_name)
5146
            self.affected_instances.append(instance)
5147

    
5148
        _ReleaseLocks(self, locking.LEVEL_INSTANCE, keep=instances_keep)
5149

    
5150
        assert (set(self.owned_locks(locking.LEVEL_INSTANCE)) ==
5151
                set(instances_keep))
5152

    
5153
  def BuildHooksEnv(self):
5154
    """Build hooks env.
5155

5156
    This runs on the master node.
5157

5158
    """
5159
    return {
5160
      "OP_TARGET": self.op.node_name,
5161
      "MASTER_CANDIDATE": str(self.op.master_candidate),
5162
      "OFFLINE": str(self.op.offline),
5163
      "DRAINED": str(self.op.drained),
5164
      "MASTER_CAPABLE": str(self.op.master_capable),
5165
      "VM_CAPABLE": str(self.op.vm_capable),
5166
      }
5167

    
5168
  def BuildHooksNodes(self):
5169
    """Build hooks nodes.
5170

5171
    """
5172
    nl = [self.cfg.GetMasterNode(), self.op.node_name]
5173
    return (nl, nl)
5174

    
5175
  def CheckPrereq(self):
5176
    """Check prerequisites.
5177

5178
    This only checks the instance list against the existing names.
5179

5180
    """
5181
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
5182

    
5183
    if (self.op.master_candidate is not None or
5184
        self.op.drained is not None or
5185
        self.op.offline is not None):
5186
      # we can't change the master's node flags
5187
      if self.op.node_name == self.cfg.GetMasterNode():
5188
        raise errors.OpPrereqError("The master role can be changed"
5189
                                   " only via master-failover",
5190
                                   errors.ECODE_INVAL)
5191

    
5192
    if self.op.master_candidate and not node.master_capable:
5193
      raise errors.OpPrereqError("Node %s is not master capable, cannot make"
5194
                                 " it a master candidate" % node.name,
5195
                                 errors.ECODE_STATE)
5196

    
5197
    if self.op.vm_capable == False:
5198
      (ipri, isec) = self.cfg.GetNodeInstances(self.op.node_name)
5199
      if ipri or isec:
5200
        raise errors.OpPrereqError("Node %s hosts instances, cannot unset"
5201
                                   " the vm_capable flag" % node.name,
5202
                                   errors.ECODE_STATE)
5203

    
5204
    if node.master_candidate and self.might_demote and not self.lock_all:
5205
      assert not self.op.auto_promote, "auto_promote set but lock_all not"
5206
      # check if after removing the current node, we're missing master
5207
      # candidates
5208
      (mc_remaining, mc_should, _) = \
5209
          self.cfg.GetMasterCandidateStats(exceptions=[node.name])
5210
      if mc_remaining < mc_should:
5211
        raise errors.OpPrereqError("Not enough master candidates, please"
5212
                                   " pass auto promote option to allow"
5213
                                   " promotion", errors.ECODE_STATE)
5214

    
5215
    self.old_flags = old_flags = (node.master_candidate,
5216
                                  node.drained, node.offline)
5217
    assert old_flags in self._F2R, "Un-handled old flags %s" % str(old_flags)
5218
    self.old_role = old_role = self._F2R[old_flags]
5219

    
5220
    # Check for ineffective changes
5221
    for attr in self._FLAGS:
5222
      if (getattr(self.op, attr) == False and getattr(node, attr) == False):
5223
        self.LogInfo("Ignoring request to unset flag %s, already unset", attr)
5224
        setattr(self.op, attr, None)
5225

    
5226
    # Past this point, any flag change to False means a transition
5227
    # away from the respective state, as only real changes are kept
5228

    
5229
    # TODO: We might query the real power state if it supports OOB
5230
    if _SupportsOob(self.cfg, node):
5231
      if self.op.offline is False and not (node.powered or
5232
                                           self.op.powered == True):
5233
        raise errors.OpPrereqError(("Node %s needs to be turned on before its"
5234
                                    " offline status can be reset") %
5235
                                   self.op.node_name)
5236
    elif self.op.powered is not None:
5237
      raise errors.OpPrereqError(("Unable to change powered state for node %s"
5238
                                  " as it does not support out-of-band"
5239
                                  " handling") % self.op.node_name)
5240

    
5241
    # If we're being deofflined/drained, we'll MC ourself if needed
5242
    if (self.op.drained == False or self.op.offline == False or
5243
        (self.op.master_capable and not node.master_capable)):
5244
      if _DecideSelfPromotion(self):
5245
        self.op.master_candidate = True
5246
        self.LogInfo("Auto-promoting node to master candidate")
5247

    
5248
    # If we're no longer master capable, we'll demote ourselves from MC
5249
    if self.op.master_capable == False and node.master_candidate:
5250
      self.LogInfo("Demoting from master candidate")
5251
      self.op.master_candidate = False
5252

    
5253
    # Compute new role
5254
    assert [getattr(self.op, attr) for attr in self._FLAGS].count(True) <= 1
5255
    if self.op.master_candidate:
5256
      new_role = self._ROLE_CANDIDATE
5257
    elif self.op.drained:
5258
      new_role = self._ROLE_DRAINED
5259
    elif self.op.offline:
5260
      new_role = self._ROLE_OFFLINE
5261
    elif False in [self.op.master_candidate, self.op.drained, self.op.offline]:
5262
      # False is still in new flags, which means we're un-setting (the
5263
      # only) True flag
5264
      new_role = self._ROLE_REGULAR
5265
    else: # no new flags, nothing, keep old role
5266
      new_role = old_role
5267

    
5268
    self.new_role = new_role
5269

    
5270
    if old_role == self._ROLE_OFFLINE and new_role != old_role:
5271
      # Trying to transition out of offline status
5272
      result = self.rpc.call_version([node.name])[node.name]
5273
      if result.fail_msg:
5274
        raise errors.OpPrereqError("Node %s is being de-offlined but fails"
5275
                                   " to report its version: %s" %
5276
                                   (node.name, result.fail_msg),
5277
                                   errors.ECODE_STATE)
5278
      else:
5279
        self.LogWarning("Transitioning node from offline to online state"
5280
                        " without using re-add. Please make sure the node"
5281
                        " is healthy!")
5282

    
5283
    if self.op.secondary_ip:
5284
      # Ok even without locking, because this can't be changed by any LU
5285
      master = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
5286
      master_singlehomed = master.secondary_ip == master.primary_ip
5287
      if master_singlehomed and self.op.secondary_ip:
5288
        raise errors.OpPrereqError("Cannot change the secondary ip on a single"
5289
                                   " homed cluster", errors.ECODE_INVAL)
5290

    
5291
      if node.offline:
5292
        if self.affected_instances:
5293
          raise errors.OpPrereqError("Cannot change secondary ip: offline"
5294
                                     " node has instances (%s) configured"
5295
                                     " to use it" % self.affected_instances)
5296
      else:
5297
        # On online nodes, check that no instances are running, and that
5298
        # the node has the new ip and we can reach it.
5299
        for instance in self.affected_instances:
5300
          _CheckInstanceDown(self, instance, "cannot change secondary ip")
5301

    
5302
        _CheckNodeHasSecondaryIP(self, node.name, self.op.secondary_ip, True)
5303
        if master.name != node.name:
5304
          # check reachability from master secondary ip to new secondary ip
5305
          if not netutils.TcpPing(self.op.secondary_ip,
5306
                                  constants.DEFAULT_NODED_PORT,
5307
                                  source=master.secondary_ip):
5308
            raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
5309
                                       " based ping to node daemon port",
5310
                                       errors.ECODE_ENVIRON)
5311

    
5312
    if self.op.ndparams:
5313
      new_ndparams = _GetUpdatedParams(self.node.ndparams, self.op.ndparams)
5314
      utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
5315
      self.new_ndparams = new_ndparams
5316

    
5317
  def Exec(self, feedback_fn):
5318
    """Modifies a node.
5319

5320
    """
5321
    node = self.node
5322
    old_role = self.old_role
5323
    new_role = self.new_role
5324

    
5325
    result = []
5326

    
5327
    if self.op.ndparams:
5328
      node.ndparams = self.new_ndparams
5329

    
5330
    if self.op.powered is not None:
5331
      node.powered = self.op.powered
5332

    
5333
    for attr in ["master_capable", "vm_capable"]:
5334
      val = getattr(self.op, attr)
5335
      if val is not None:
5336
        setattr(node, attr, val)
5337
        result.append((attr, str(val)))
5338

    
5339
    if new_role != old_role:
5340
      # Tell the node to demote itself, if no longer MC and not offline
5341
      if old_role == self._ROLE_CANDIDATE and new_role != self._ROLE_OFFLINE:
5342
        msg = self.rpc.call_node_demote_from_mc(node.name).fail_msg
5343
        if msg:
5344
          self.LogWarning("Node failed to demote itself: %s", msg)
5345

    
5346
      new_flags = self._R2F[new_role]
5347
      for of, nf, desc in zip(self.old_flags, new_flags, self._FLAGS):
5348
        if of != nf:
5349
          result.append((desc, str(nf)))
5350
      (node.master_candidate, node.drained, node.offline) = new_flags
5351

    
5352
      # we locked all nodes, we adjust the CP before updating this node
5353
      if self.lock_all:
5354
        _AdjustCandidatePool(self, [node.name])
5355

    
5356
    if self.op.secondary_ip:
5357
      node.secondary_ip = self.op.secondary_ip
5358
      result.append(("secondary_ip", self.op.secondary_ip))
5359

    
5360
    # this will trigger configuration file update, if needed
5361
    self.cfg.Update(node, feedback_fn)
5362

    
5363
    # this will trigger job queue propagation or cleanup if the mc
5364
    # flag changed
5365
    if [old_role, new_role].count(self._ROLE_CANDIDATE) == 1:
5366
      self.context.ReaddNode(node)
5367

    
5368
    return result
5369

    
5370

    
5371
class LUNodePowercycle(NoHooksLU):
5372
  """Powercycles a node.
5373

5374
  """
5375
  REQ_BGL = False
5376

    
5377
  def CheckArguments(self):
5378
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
5379
    if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
5380
      raise errors.OpPrereqError("The node is the master and the force"
5381
                                 " parameter was not set",
5382
                                 errors.ECODE_INVAL)
5383

    
5384
  def ExpandNames(self):
5385
    """Locking for PowercycleNode.
5386

5387
    This is a last-resort option and shouldn't block on other
5388
    jobs. Therefore, we grab no locks.
5389

5390
    """
5391
    self.needed_locks = {}
5392

    
5393
  def Exec(self, feedback_fn):
5394
    """Reboots a node.
5395

5396
    """
5397
    result = self.rpc.call_node_powercycle(self.op.node_name,
5398
                                           self.cfg.GetHypervisorType())
5399
    result.Raise("Failed to schedule the reboot")
5400
    return result.payload
5401

    
5402

    
5403
class LUClusterQuery(NoHooksLU):
5404
  """Query cluster configuration.
5405

5406
  """
5407
  REQ_BGL = False
5408

    
5409
  def ExpandNames(self):
5410
    self.needed_locks = {}
5411

    
5412
  def Exec(self, feedback_fn):
5413
    """Return cluster config.
5414

5415
    """
5416
    cluster = self.cfg.GetClusterInfo()
5417
    os_hvp = {}
5418

    
5419
    # Filter just for enabled hypervisors
5420
    for os_name, hv_dict in cluster.os_hvp.items():
5421
      os_hvp[os_name] = {}
5422
      for hv_name, hv_params in hv_dict.items():
5423
        if hv_name in cluster.enabled_hypervisors:
5424
          os_hvp[os_name][hv_name] = hv_params
5425

    
5426
    # Convert ip_family to ip_version
5427
    primary_ip_version = constants.IP4_VERSION
5428
    if cluster.primary_ip_family == netutils.IP6Address.family:
5429
      primary_ip_version = constants.IP6_VERSION
5430

    
5431
    result = {
5432
      "software_version": constants.RELEASE_VERSION,
5433
      "protocol_version": constants.PROTOCOL_VERSION,
5434
      "config_version": constants.CONFIG_VERSION,
5435
      "os_api_version": max(constants.OS_API_VERSIONS),
5436
      "export_version": constants.EXPORT_VERSION,
5437
      "architecture": (platform.architecture()[0], platform.machine()),
5438
      "name": cluster.cluster_name,
5439
      "master": cluster.master_node,
5440
      "default_hypervisor": cluster.enabled_hypervisors[0],
5441
      "enabled_hypervisors": cluster.enabled_hypervisors,
5442
      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
5443
                        for hypervisor_name in cluster.enabled_hypervisors]),
5444
      "os_hvp": os_hvp,
5445
      "beparams": cluster.beparams,
5446
      "osparams": cluster.osparams,
5447
      "nicparams": cluster.nicparams,
5448
      "ndparams": cluster.ndparams,
5449
      "candidate_pool_size": cluster.candidate_pool_size,
5450
      "master_netdev": cluster.master_netdev,
5451
      "volume_group_name": cluster.volume_group_name,
5452
      "drbd_usermode_helper": cluster.drbd_usermode_helper,
5453
      "file_storage_dir": cluster.file_storage_dir,
5454
      "shared_file_storage_dir": cluster.shared_file_storage_dir,
5455
      "maintain_node_health": cluster.maintain_node_health,
5456
      "ctime": cluster.ctime,
5457
      "mtime": cluster.mtime,
5458
      "uuid": cluster.uuid,
5459
      "tags": list(cluster.GetTags()),
5460
      "uid_pool": cluster.uid_pool,
5461
      "default_iallocator": cluster.default_iallocator,
5462
      "reserved_lvs": cluster.reserved_lvs,
5463
      "primary_ip_version": primary_ip_version,
5464
      "prealloc_wipe_disks": cluster.prealloc_wipe_disks,
5465
      "hidden_os": cluster.hidden_os,
5466
      "blacklisted_os": cluster.blacklisted_os,
5467
      }
5468

    
5469
    return result
5470

    
5471

    
5472
class LUClusterConfigQuery(NoHooksLU):
5473
  """Return configuration values.
5474

5475
  """
5476
  REQ_BGL = False
5477
  _FIELDS_DYNAMIC = utils.FieldSet()
5478
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag",
5479
                                  "watcher_pause", "volume_group_name")
5480

    
5481
  def CheckArguments(self):
5482
    _CheckOutputFields(static=self._FIELDS_STATIC,
5483
                       dynamic=self._FIELDS_DYNAMIC,
5484
                       selected=self.op.output_fields)
5485

    
5486
  def ExpandNames(self):
5487
    self.needed_locks = {}
5488

    
5489
  def Exec(self, feedback_fn):
5490
    """Dump a representation of the cluster config to the standard output.
5491

5492
    """
5493
    values = []
5494
    for field in self.op.output_fields:
5495
      if field == "cluster_name":
5496
        entry = self.cfg.GetClusterName()
5497
      elif field == "master_node":
5498
        entry = self.cfg.GetMasterNode()
5499
      elif field == "drain_flag":
5500
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
5501
      elif field == "watcher_pause":
5502
        entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
5503
      elif field == "volume_group_name":
5504
        entry = self.cfg.GetVGName()
5505
      else:
5506
        raise errors.ParameterError(field)
5507
      values.append(entry)
5508
    return values
5509

    
5510

    
5511
class LUInstanceActivateDisks(NoHooksLU):
5512
  """Bring up an instance's disks.
5513

5514
  """
5515
  REQ_BGL = False
5516

    
5517
  def ExpandNames(self):
5518
    self._ExpandAndLockInstance()
5519
    self.needed_locks[locking.LEVEL_NODE] = []
5520
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5521

    
5522
  def DeclareLocks(self, level):
5523
    if level == locking.LEVEL_NODE:
5524
      self._LockInstancesNodes()
5525

    
5526
  def CheckPrereq(self):
5527
    """Check prerequisites.
5528

5529
    This checks that the instance is in the cluster.
5530

5531
    """
5532
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5533
    assert self.instance is not None, \
5534
      "Cannot retrieve locked instance %s" % self.op.instance_name
5535
    _CheckNodeOnline(self, self.instance.primary_node)
5536

    
5537
  def Exec(self, feedback_fn):
5538
    """Activate the disks.
5539

5540
    """
5541
    disks_ok, disks_info = \
5542
              _AssembleInstanceDisks(self, self.instance,
5543
                                     ignore_size=self.op.ignore_size)
5544
    if not disks_ok:
5545
      raise errors.OpExecError("Cannot activate block devices")
5546

    
5547
    return disks_info
5548

    
5549

    
5550
def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
5551
                           ignore_size=False):
5552
  """Prepare the block devices for an instance.
5553

5554
  This sets up the block devices on all nodes.
5555

5556
  @type lu: L{LogicalUnit}
5557
  @param lu: the logical unit on whose behalf we execute
5558
  @type instance: L{objects.Instance}
5559
  @param instance: the instance for whose disks we assemble
5560
  @type disks: list of L{objects.Disk} or None
5561
  @param disks: which disks to assemble (or all, if None)
5562
  @type ignore_secondaries: boolean
5563
  @param ignore_secondaries: if true, errors on secondary nodes
5564
      won't result in an error return from the function
5565
  @type ignore_size: boolean
5566
  @param ignore_size: if true, the current known size of the disk
5567
      will not be used during the disk activation, useful for cases
5568
      when the size is wrong
5569
  @return: False if the operation failed, otherwise a list of
5570
      (host, instance_visible_name, node_visible_name)
5571
      with the mapping from node devices to instance devices
5572

5573
  """
5574
  device_info = []
5575
  disks_ok = True
5576
  iname = instance.name
5577
  disks = _ExpandCheckDisks(instance, disks)
5578

    
5579
  # With the two passes mechanism we try to reduce the window of
5580
  # opportunity for the race condition of switching DRBD to primary
5581
  # before handshaking occured, but we do not eliminate it
5582

    
5583
  # The proper fix would be to wait (with some limits) until the
5584
  # connection has been made and drbd transitions from WFConnection
5585
  # into any other network-connected state (Connected, SyncTarget,
5586
  # SyncSource, etc.)
5587

    
5588
  # 1st pass, assemble on all nodes in secondary mode
5589
  for idx, inst_disk in enumerate(disks):
5590
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
5591
      if ignore_size:
5592
        node_disk = node_disk.Copy()
5593
        node_disk.UnsetSize()
5594
      lu.cfg.SetDiskID(node_disk, node)
5595
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False, idx)
5596
      msg = result.fail_msg
5597
      if msg:
5598
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
5599
                           " (is_primary=False, pass=1): %s",
5600
                           inst_disk.iv_name, node, msg)
5601
        if not ignore_secondaries:
5602
          disks_ok = False
5603

    
5604
  # FIXME: race condition on drbd migration to primary
5605

    
5606
  # 2nd pass, do only the primary node
5607
  for idx, inst_disk in enumerate(disks):
5608
    dev_path = None
5609

    
5610
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
5611
      if node != instance.primary_node:
5612
        continue
5613
      if ignore_size:
5614
        node_disk = node_disk.Copy()
5615
        node_disk.UnsetSize()
5616
      lu.cfg.SetDiskID(node_disk, node)
5617
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True, idx)
5618
      msg = result.fail_msg
5619
      if msg:
5620
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
5621
                           " (is_primary=True, pass=2): %s",
5622
                           inst_disk.iv_name, node, msg)
5623
        disks_ok = False
5624
      else:
5625
        dev_path = result.payload
5626

    
5627
    device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
5628

    
5629
  # leave the disks configured for the primary node
5630
  # this is a workaround that would be fixed better by
5631
  # improving the logical/physical id handling
5632
  for disk in disks:
5633
    lu.cfg.SetDiskID(disk, instance.primary_node)
5634

    
5635
  return disks_ok, device_info
5636

    
5637

    
5638
def _StartInstanceDisks(lu, instance, force):
5639
  """Start the disks of an instance.
5640

5641
  """
5642
  disks_ok, _ = _AssembleInstanceDisks(lu, instance,
5643
                                           ignore_secondaries=force)
5644
  if not disks_ok:
5645
    _ShutdownInstanceDisks(lu, instance)
5646
    if force is not None and not force:
5647
      lu.proc.LogWarning("", hint="If the message above refers to a"
5648
                         " secondary node,"
5649
                         " you can retry the operation using '--force'.")
5650
    raise errors.OpExecError("Disk consistency error")
5651

    
5652

    
5653
class LUInstanceDeactivateDisks(NoHooksLU):
5654
  """Shutdown an instance's disks.
5655

5656
  """
5657
  REQ_BGL = False
5658

    
5659
  def ExpandNames(self):
5660
    self._ExpandAndLockInstance()
5661
    self.needed_locks[locking.LEVEL_NODE] = []
5662
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5663

    
5664
  def DeclareLocks(self, level):
5665
    if level == locking.LEVEL_NODE:
5666
      self._LockInstancesNodes()
5667

    
5668
  def CheckPrereq(self):
5669
    """Check prerequisites.
5670

5671
    This checks that the instance is in the cluster.
5672

5673
    """
5674
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5675
    assert self.instance is not None, \
5676
      "Cannot retrieve locked instance %s" % self.op.instance_name
5677

    
5678
  def Exec(self, feedback_fn):
5679
    """Deactivate the disks
5680

5681
    """
5682
    instance = self.instance
5683
    if self.op.force:
5684
      _ShutdownInstanceDisks(self, instance)
5685
    else:
5686
      _SafeShutdownInstanceDisks(self, instance)
5687

    
5688

    
5689
def _SafeShutdownInstanceDisks(lu, instance, disks=None):
5690
  """Shutdown block devices of an instance.
5691

5692
  This function checks if an instance is running, before calling
5693
  _ShutdownInstanceDisks.
5694

5695
  """
5696
  _CheckInstanceDown(lu, instance, "cannot shutdown disks")
5697
  _ShutdownInstanceDisks(lu, instance, disks=disks)
5698

    
5699

    
5700
def _ExpandCheckDisks(instance, disks):
5701
  """Return the instance disks selected by the disks list
5702

5703
  @type disks: list of L{objects.Disk} or None
5704
  @param disks: selected disks
5705
  @rtype: list of L{objects.Disk}
5706
  @return: selected instance disks to act on
5707

5708
  """
5709
  if disks is None:
5710
    return instance.disks
5711
  else:
5712
    if not set(disks).issubset(instance.disks):
5713
      raise errors.ProgrammerError("Can only act on disks belonging to the"
5714
                                   " target instance")
5715
    return disks
5716

    
5717

    
5718
def _ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
5719
  """Shutdown block devices of an instance.
5720

5721
  This does the shutdown on all nodes of the instance.
5722

5723
  If the ignore_primary is false, errors on the primary node are
5724
  ignored.
5725

5726
  """
5727
  all_result = True
5728
  disks = _ExpandCheckDisks(instance, disks)
5729

    
5730
  for disk in disks:
5731
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
5732
      lu.cfg.SetDiskID(top_disk, node)
5733
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
5734
      msg = result.fail_msg
5735
      if msg:
5736
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
5737
                      disk.iv_name, node, msg)
5738
        if ((node == instance.primary_node and not ignore_primary) or
5739
            (node != instance.primary_node and not result.offline)):
5740
          all_result = False
5741
  return all_result
5742

    
5743

    
5744
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
5745
  """Checks if a node has enough free memory.
5746

5747
  This function check if a given node has the needed amount of free
5748
  memory. In case the node has less memory or we cannot get the
5749
  information from the node, this function raise an OpPrereqError
5750
  exception.
5751

5752
  @type lu: C{LogicalUnit}
5753
  @param lu: a logical unit from which we get configuration data
5754
  @type node: C{str}
5755
  @param node: the node to check
5756
  @type reason: C{str}
5757
  @param reason: string to use in the error message
5758
  @type requested: C{int}
5759
  @param requested: the amount of memory in MiB to check for
5760
  @type hypervisor_name: C{str}
5761
  @param hypervisor_name: the hypervisor to ask for memory stats
5762
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
5763
      we cannot check the node
5764

5765
  """
5766
  nodeinfo = lu.rpc.call_node_info([node], None, hypervisor_name)
5767
  nodeinfo[node].Raise("Can't get data from node %s" % node,
5768
                       prereq=True, ecode=errors.ECODE_ENVIRON)
5769
  free_mem = nodeinfo[node].payload.get("memory_free", None)
5770
  if not isinstance(free_mem, int):
5771
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
5772
                               " was '%s'" % (node, free_mem),
5773
                               errors.ECODE_ENVIRON)
5774
  if requested > free_mem:
5775
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
5776
                               " needed %s MiB, available %s MiB" %
5777
                               (node, reason, requested, free_mem),
5778
                               errors.ECODE_NORES)
5779

    
5780

    
5781
def _CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
5782
  """Checks if nodes have enough free disk space in the all VGs.
5783

5784
  This function check if all given nodes have the needed amount of
5785
  free disk. In case any node has less disk or we cannot get the
5786
  information from the node, this function raise an OpPrereqError
5787
  exception.
5788

5789
  @type lu: C{LogicalUnit}
5790
  @param lu: a logical unit from which we get configuration data
5791
  @type nodenames: C{list}
5792
  @param nodenames: the list of node names to check
5793
  @type req_sizes: C{dict}
5794
  @param req_sizes: the hash of vg and corresponding amount of disk in
5795
      MiB to check for
5796
  @raise errors.OpPrereqError: if the node doesn't have enough disk,
5797
      or we cannot check the node
5798

5799
  """
5800
  for vg, req_size in req_sizes.items():
5801
    _CheckNodesFreeDiskOnVG(lu, nodenames, vg, req_size)
5802

    
5803

    
5804
def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
5805
  """Checks if nodes have enough free disk space in the specified VG.
5806

5807
  This function check if all given nodes have the needed amount of
5808
  free disk. In case any node has less disk or we cannot get the
5809
  information from the node, this function raise an OpPrereqError
5810
  exception.
5811

5812
  @type lu: C{LogicalUnit}
5813
  @param lu: a logical unit from which we get configuration data
5814
  @type nodenames: C{list}
5815
  @param nodenames: the list of node names to check
5816
  @type vg: C{str}
5817
  @param vg: the volume group to check
5818
  @type requested: C{int}
5819
  @param requested: the amount of disk in MiB to check for
5820
  @raise errors.OpPrereqError: if the node doesn't have enough disk,
5821
      or we cannot check the node
5822

5823
  """
5824
  nodeinfo = lu.rpc.call_node_info(nodenames, vg, None)
5825
  for node in nodenames:
5826
    info = nodeinfo[node]
5827
    info.Raise("Cannot get current information from node %s" % node,
5828
               prereq=True, ecode=errors.ECODE_ENVIRON)
5829
    vg_free = info.payload.get("vg_free", None)
5830
    if not isinstance(vg_free, int):
5831
      raise errors.OpPrereqError("Can't compute free disk space on node"
5832
                                 " %s for vg %s, result was '%s'" %
5833
                                 (node, vg, vg_free), errors.ECODE_ENVIRON)
5834
    if requested > vg_free:
5835
      raise errors.OpPrereqError("Not enough disk space on target node %s"
5836
                                 " vg %s: required %d MiB, available %d MiB" %
5837
                                 (node, vg, requested, vg_free),
5838
                                 errors.ECODE_NORES)
5839

    
5840

    
5841
class LUInstanceStartup(LogicalUnit):
5842
  """Starts an instance.
5843

5844
  """
5845
  HPATH = "instance-start"
5846
  HTYPE = constants.HTYPE_INSTANCE
5847
  REQ_BGL = False
5848

    
5849
  def CheckArguments(self):
5850
    # extra beparams
5851
    if self.op.beparams:
5852
      # fill the beparams dict
5853
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
5854

    
5855
  def ExpandNames(self):
5856
    self._ExpandAndLockInstance()
5857

    
5858
  def BuildHooksEnv(self):
5859
    """Build hooks env.
5860

5861
    This runs on master, primary and secondary nodes of the instance.
5862

5863
    """
5864
    env = {
5865
      "FORCE": self.op.force,
5866
      }
5867

    
5868
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5869

    
5870
    return env
5871

    
5872
  def BuildHooksNodes(self):
5873
    """Build hooks nodes.
5874

5875
    """
5876
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5877
    return (nl, nl)
5878

    
5879
  def CheckPrereq(self):
5880
    """Check prerequisites.
5881

5882
    This checks that the instance is in the cluster.
5883

5884
    """
5885
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5886
    assert self.instance is not None, \
5887
      "Cannot retrieve locked instance %s" % self.op.instance_name
5888

    
5889
    # extra hvparams
5890
    if self.op.hvparams:
5891
      # check hypervisor parameter syntax (locally)
5892
      cluster = self.cfg.GetClusterInfo()
5893
      utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
5894
      filled_hvp = cluster.FillHV(instance)
5895
      filled_hvp.update(self.op.hvparams)
5896
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
5897
      hv_type.CheckParameterSyntax(filled_hvp)
5898
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
5899

    
5900
    self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline
5901

    
5902
    if self.primary_offline and self.op.ignore_offline_nodes:
5903
      self.proc.LogWarning("Ignoring offline primary node")
5904

    
5905
      if self.op.hvparams or self.op.beparams:
5906
        self.proc.LogWarning("Overridden parameters are ignored")
5907
    else:
5908
      _CheckNodeOnline(self, instance.primary_node)
5909

    
5910
      bep = self.cfg.GetClusterInfo().FillBE(instance)
5911

    
5912
      # check bridges existence
5913
      _CheckInstanceBridgesExist(self, instance)
5914

    
5915
      remote_info = self.rpc.call_instance_info(instance.primary_node,
5916
                                                instance.name,
5917
                                                instance.hypervisor)
5918
      remote_info.Raise("Error checking node %s" % instance.primary_node,
5919
                        prereq=True, ecode=errors.ECODE_ENVIRON)
5920
      if not remote_info.payload: # not running already
5921
        _CheckNodeFreeMemory(self, instance.primary_node,
5922
                             "starting instance %s" % instance.name,
5923
                             bep[constants.BE_MEMORY], instance.hypervisor)
5924

    
5925
  def Exec(self, feedback_fn):
5926
    """Start the instance.
5927

5928
    """
5929
    instance = self.instance
5930
    force = self.op.force
5931

    
5932
    if not self.op.no_remember:
5933
      self.cfg.MarkInstanceUp(instance.name)
5934

    
5935
    if self.primary_offline:
5936
      assert self.op.ignore_offline_nodes
5937
      self.proc.LogInfo("Primary node offline, marked instance as started")
5938
    else:
5939
      node_current = instance.primary_node
5940

    
5941
      _StartInstanceDisks(self, instance, force)
5942

    
5943
      result = self.rpc.call_instance_start(node_current, instance,
5944
                                            self.op.hvparams, self.op.beparams,
5945
                                            self.op.startup_paused)
5946
      msg = result.fail_msg
5947
      if msg:
5948
        _ShutdownInstanceDisks(self, instance)
5949
        raise errors.OpExecError("Could not start instance: %s" % msg)
5950

    
5951

    
5952
class LUInstanceReboot(LogicalUnit):
5953
  """Reboot an instance.
5954

5955
  """
5956
  HPATH = "instance-reboot"
5957
  HTYPE = constants.HTYPE_INSTANCE
5958
  REQ_BGL = False
5959

    
5960
  def ExpandNames(self):
5961
    self._ExpandAndLockInstance()
5962

    
5963
  def BuildHooksEnv(self):
5964
    """Build hooks env.
5965

5966
    This runs on master, primary and secondary nodes of the instance.
5967

5968
    """
5969
    env = {
5970
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
5971
      "REBOOT_TYPE": self.op.reboot_type,
5972
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5973
      }
5974

    
5975
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5976

    
5977
    return env
5978

    
5979
  def BuildHooksNodes(self):
5980
    """Build hooks nodes.
5981

5982
    """
5983
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5984
    return (nl, nl)
5985

    
5986
  def CheckPrereq(self):
5987
    """Check prerequisites.
5988

5989
    This checks that the instance is in the cluster.
5990

5991
    """
5992
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5993
    assert self.instance is not None, \
5994
      "Cannot retrieve locked instance %s" % self.op.instance_name
5995

    
5996
    _CheckNodeOnline(self, instance.primary_node)
5997

    
5998
    # check bridges existence
5999
    _CheckInstanceBridgesExist(self, instance)
6000

    
6001
  def Exec(self, feedback_fn):
6002
    """Reboot the instance.
6003

6004
    """
6005
    instance = self.instance
6006
    ignore_secondaries = self.op.ignore_secondaries
6007
    reboot_type = self.op.reboot_type
6008

    
6009
    remote_info = self.rpc.call_instance_info(instance.primary_node,
6010
                                              instance.name,
6011
                                              instance.hypervisor)
6012
    remote_info.Raise("Error checking node %s" % instance.primary_node)
6013
    instance_running = bool(remote_info.payload)
6014

    
6015
    node_current = instance.primary_node
6016

    
6017
    if instance_running and reboot_type in [constants.INSTANCE_REBOOT_SOFT,
6018
                                            constants.INSTANCE_REBOOT_HARD]:
6019
      for disk in instance.disks:
6020
        self.cfg.SetDiskID(disk, node_current)
6021
      result = self.rpc.call_instance_reboot(node_current, instance,
6022
                                             reboot_type,
6023
                                             self.op.shutdown_timeout)
6024
      result.Raise("Could not reboot instance")
6025
    else:
6026
      if instance_running:
6027
        result = self.rpc.call_instance_shutdown(node_current, instance,
6028
                                                 self.op.shutdown_timeout)
6029
        result.Raise("Could not shutdown instance for full reboot")
6030
        _ShutdownInstanceDisks(self, instance)
6031
      else:
6032
        self.LogInfo("Instance %s was already stopped, starting now",
6033
                     instance.name)
6034
      _StartInstanceDisks(self, instance, ignore_secondaries)
6035
      result = self.rpc.call_instance_start(node_current, instance,
6036
                                            None, None, False)
6037
      msg = result.fail_msg
6038
      if msg:
6039
        _ShutdownInstanceDisks(self, instance)
6040
        raise errors.OpExecError("Could not start instance for"
6041
                                 " full reboot: %s" % msg)
6042

    
6043
    self.cfg.MarkInstanceUp(instance.name)
6044

    
6045

    
6046
class LUInstanceShutdown(LogicalUnit):
6047
  """Shutdown an instance.
6048

6049
  """
6050
  HPATH = "instance-stop"
6051
  HTYPE = constants.HTYPE_INSTANCE
6052
  REQ_BGL = False
6053

    
6054
  def ExpandNames(self):
6055
    self._ExpandAndLockInstance()
6056

    
6057
  def BuildHooksEnv(self):
6058
    """Build hooks env.
6059

6060
    This runs on master, primary and secondary nodes of the instance.
6061

6062
    """
6063
    env = _BuildInstanceHookEnvByObject(self, self.instance)
6064
    env["TIMEOUT"] = self.op.timeout
6065
    return env
6066

    
6067
  def BuildHooksNodes(self):
6068
    """Build hooks nodes.
6069

6070
    """
6071
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6072
    return (nl, nl)
6073

    
6074
  def CheckPrereq(self):
6075
    """Check prerequisites.
6076

6077
    This checks that the instance is in the cluster.
6078

6079
    """
6080
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6081
    assert self.instance is not None, \
6082
      "Cannot retrieve locked instance %s" % self.op.instance_name
6083

    
6084
    self.primary_offline = \
6085
      self.cfg.GetNodeInfo(self.instance.primary_node).offline
6086

    
6087
    if self.primary_offline and self.op.ignore_offline_nodes:
6088
      self.proc.LogWarning("Ignoring offline primary node")
6089
    else:
6090
      _CheckNodeOnline(self, self.instance.primary_node)
6091

    
6092
  def Exec(self, feedback_fn):
6093
    """Shutdown the instance.
6094

6095
    """
6096
    instance = self.instance
6097
    node_current = instance.primary_node
6098
    timeout = self.op.timeout
6099

    
6100
    if not self.op.no_remember:
6101
      self.cfg.MarkInstanceDown(instance.name)
6102

    
6103
    if self.primary_offline:
6104
      assert self.op.ignore_offline_nodes
6105
      self.proc.LogInfo("Primary node offline, marked instance as stopped")
6106
    else:
6107
      result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
6108
      msg = result.fail_msg
6109
      if msg:
6110
        self.proc.LogWarning("Could not shutdown instance: %s" % msg)
6111

    
6112
      _ShutdownInstanceDisks(self, instance)
6113

    
6114

    
6115
class LUInstanceReinstall(LogicalUnit):
6116
  """Reinstall an instance.
6117

6118
  """
6119
  HPATH = "instance-reinstall"
6120
  HTYPE = constants.HTYPE_INSTANCE
6121
  REQ_BGL = False
6122

    
6123
  def ExpandNames(self):
6124
    self._ExpandAndLockInstance()
6125

    
6126
  def BuildHooksEnv(self):
6127
    """Build hooks env.
6128

6129
    This runs on master, primary and secondary nodes of the instance.
6130

6131
    """
6132
    return _BuildInstanceHookEnvByObject(self, self.instance)
6133

    
6134
  def BuildHooksNodes(self):
6135
    """Build hooks nodes.
6136

6137
    """
6138
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6139
    return (nl, nl)
6140

    
6141
  def CheckPrereq(self):
6142
    """Check prerequisites.
6143

6144
    This checks that the instance is in the cluster and is not running.
6145

6146
    """
6147
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6148
    assert instance is not None, \
6149
      "Cannot retrieve locked instance %s" % self.op.instance_name
6150
    _CheckNodeOnline(self, instance.primary_node, "Instance primary node"
6151
                     " offline, cannot reinstall")
6152
    for node in instance.secondary_nodes:
6153
      _CheckNodeOnline(self, node, "Instance secondary node offline,"
6154
                       " cannot reinstall")
6155

    
6156
    if instance.disk_template == constants.DT_DISKLESS:
6157
      raise errors.OpPrereqError("Instance '%s' has no disks" %
6158
                                 self.op.instance_name,
6159
                                 errors.ECODE_INVAL)
6160
    _CheckInstanceDown(self, instance, "cannot reinstall")
6161

    
6162
    if self.op.os_type is not None:
6163
      # OS verification
6164
      pnode = _ExpandNodeName(self.cfg, instance.primary_node)
6165
      _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
6166
      instance_os = self.op.os_type
6167
    else:
6168
      instance_os = instance.os
6169

    
6170
    nodelist = list(instance.all_nodes)
6171

    
6172
    if self.op.osparams:
6173
      i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
6174
      _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
6175
      self.os_inst = i_osdict # the new dict (without defaults)
6176
    else:
6177
      self.os_inst = None
6178

    
6179
    self.instance = instance
6180

    
6181
  def Exec(self, feedback_fn):
6182
    """Reinstall the instance.
6183

6184
    """
6185
    inst = self.instance
6186

    
6187
    if self.op.os_type is not None:
6188
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
6189
      inst.os = self.op.os_type
6190
      # Write to configuration
6191
      self.cfg.Update(inst, feedback_fn)
6192

    
6193
    _StartInstanceDisks(self, inst, None)
6194
    try:
6195
      feedback_fn("Running the instance OS create scripts...")
6196
      # FIXME: pass debug option from opcode to backend
6197
      result = self.rpc.call_instance_os_add(inst.primary_node, inst, True,
6198
                                             self.op.debug_level,
6199
                                             osparams=self.os_inst)
6200
      result.Raise("Could not install OS for instance %s on node %s" %
6201
                   (inst.name, inst.primary_node))
6202
    finally:
6203
      _ShutdownInstanceDisks(self, inst)
6204

    
6205

    
6206
class LUInstanceRecreateDisks(LogicalUnit):
6207
  """Recreate an instance's missing disks.
6208

6209
  """
6210
  HPATH = "instance-recreate-disks"
6211
  HTYPE = constants.HTYPE_INSTANCE
6212
  REQ_BGL = False
6213

    
6214
  def CheckArguments(self):
6215
    # normalise the disk list
6216
    self.op.disks = sorted(frozenset(self.op.disks))
6217

    
6218
  def ExpandNames(self):
6219
    self._ExpandAndLockInstance()
6220
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6221
    if self.op.nodes:
6222
      self.op.nodes = [_ExpandNodeName(self.cfg, n) for n in self.op.nodes]
6223
      self.needed_locks[locking.LEVEL_NODE] = list(self.op.nodes)
6224
    else:
6225
      self.needed_locks[locking.LEVEL_NODE] = []
6226

    
6227
  def DeclareLocks(self, level):
6228
    if level == locking.LEVEL_NODE:
6229
      # if we replace the nodes, we only need to lock the old primary,
6230
      # otherwise we need to lock all nodes for disk re-creation
6231
      primary_only = bool(self.op.nodes)
6232
      self._LockInstancesNodes(primary_only=primary_only)
6233

    
6234
  def BuildHooksEnv(self):
6235
    """Build hooks env.
6236

6237
    This runs on master, primary and secondary nodes of the instance.
6238

6239
    """
6240
    return _BuildInstanceHookEnvByObject(self, self.instance)
6241

    
6242
  def BuildHooksNodes(self):
6243
    """Build hooks nodes.
6244

6245
    """
6246
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6247
    return (nl, nl)
6248

    
6249
  def CheckPrereq(self):
6250
    """Check prerequisites.
6251

6252
    This checks that the instance is in the cluster and is not running.
6253

6254
    """
6255
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6256
    assert instance is not None, \
6257
      "Cannot retrieve locked instance %s" % self.op.instance_name
6258
    if self.op.nodes:
6259
      if len(self.op.nodes) != len(instance.all_nodes):
6260
        raise errors.OpPrereqError("Instance %s currently has %d nodes, but"
6261
                                   " %d replacement nodes were specified" %
6262
                                   (instance.name, len(instance.all_nodes),
6263
                                    len(self.op.nodes)),
6264
                                   errors.ECODE_INVAL)
6265
      assert instance.disk_template != constants.DT_DRBD8 or \
6266
          len(self.op.nodes) == 2
6267
      assert instance.disk_template != constants.DT_PLAIN or \
6268
          len(self.op.nodes) == 1
6269
      primary_node = self.op.nodes[0]
6270
    else:
6271
      primary_node = instance.primary_node
6272
    _CheckNodeOnline(self, primary_node)
6273

    
6274
    if instance.disk_template == constants.DT_DISKLESS:
6275
      raise errors.OpPrereqError("Instance '%s' has no disks" %
6276
                                 self.op.instance_name, errors.ECODE_INVAL)
6277
    # if we replace nodes *and* the old primary is offline, we don't
6278
    # check
6279
    assert instance.primary_node in self.needed_locks[locking.LEVEL_NODE]
6280
    old_pnode = self.cfg.GetNodeInfo(instance.primary_node)
6281
    if not (self.op.nodes and old_pnode.offline):
6282
      _CheckInstanceDown(self, instance, "cannot recreate disks")
6283

    
6284
    if not self.op.disks:
6285
      self.op.disks = range(len(instance.disks))
6286
    else:
6287
      for idx in self.op.disks:
6288
        if idx >= len(instance.disks):
6289
          raise errors.OpPrereqError("Invalid disk index '%s'" % idx,
6290
                                     errors.ECODE_INVAL)
6291
    if self.op.disks != range(len(instance.disks)) and self.op.nodes:
6292
      raise errors.OpPrereqError("Can't recreate disks partially and"
6293
                                 " change the nodes at the same time",
6294
                                 errors.ECODE_INVAL)
6295
    self.instance = instance
6296

    
6297
  def Exec(self, feedback_fn):
6298
    """Recreate the disks.
6299

6300
    """
6301
    instance = self.instance
6302

    
6303
    to_skip = []
6304
    mods = [] # keeps track of needed logical_id changes
6305

    
6306
    for idx, disk in enumerate(instance.disks):
6307
      if idx not in self.op.disks: # disk idx has not been passed in
6308
        to_skip.append(idx)
6309
        continue
6310
      # update secondaries for disks, if needed
6311
      if self.op.nodes:
6312
        if disk.dev_type == constants.LD_DRBD8:
6313
          # need to update the nodes and minors
6314
          assert len(self.op.nodes) == 2
6315
          assert len(disk.logical_id) == 6 # otherwise disk internals
6316
                                           # have changed
6317
          (_, _, old_port, _, _, old_secret) = disk.logical_id
6318
          new_minors = self.cfg.AllocateDRBDMinor(self.op.nodes, instance.name)
6319
          new_id = (self.op.nodes[0], self.op.nodes[1], old_port,
6320
                    new_minors[0], new_minors[1], old_secret)
6321
          assert len(disk.logical_id) == len(new_id)
6322
          mods.append((idx, new_id))
6323

    
6324
    # now that we have passed all asserts above, we can apply the mods
6325
    # in a single run (to avoid partial changes)
6326
    for idx, new_id in mods:
6327
      instance.disks[idx].logical_id = new_id
6328

    
6329
    # change primary node, if needed
6330
    if self.op.nodes:
6331
      instance.primary_node = self.op.nodes[0]
6332
      self.LogWarning("Changing the instance's nodes, you will have to"
6333
                      " remove any disks left on the older nodes manually")
6334

    
6335
    if self.op.nodes:
6336
      self.cfg.Update(instance, feedback_fn)
6337

    
6338
    _CreateDisks(self, instance, to_skip=to_skip)
6339

    
6340

    
6341
class LUInstanceRename(LogicalUnit):
6342
  """Rename an instance.
6343

6344
  """
6345
  HPATH = "instance-rename"
6346
  HTYPE = constants.HTYPE_INSTANCE
6347

    
6348
  def CheckArguments(self):
6349
    """Check arguments.
6350

6351
    """
6352
    if self.op.ip_check and not self.op.name_check:
6353
      # TODO: make the ip check more flexible and not depend on the name check
6354
      raise errors.OpPrereqError("IP address check requires a name check",
6355
                                 errors.ECODE_INVAL)
6356

    
6357
  def BuildHooksEnv(self):
6358
    """Build hooks env.
6359

6360
    This runs on master, primary and secondary nodes of the instance.
6361

6362
    """
6363
    env = _BuildInstanceHookEnvByObject(self, self.instance)
6364
    env["INSTANCE_NEW_NAME"] = self.op.new_name
6365
    return env
6366

    
6367
  def BuildHooksNodes(self):
6368
    """Build hooks nodes.
6369

6370
    """
6371
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6372
    return (nl, nl)
6373

    
6374
  def CheckPrereq(self):
6375
    """Check prerequisites.
6376

6377
    This checks that the instance is in the cluster and is not running.
6378

6379
    """
6380
    self.op.instance_name = _ExpandInstanceName(self.cfg,
6381
                                                self.op.instance_name)
6382
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6383
    assert instance is not None
6384
    _CheckNodeOnline(self, instance.primary_node)
6385
    _CheckInstanceDown(self, instance, "cannot rename")
6386
    self.instance = instance
6387

    
6388
    new_name = self.op.new_name
6389
    if self.op.name_check:
6390
      hostname = netutils.GetHostname(name=new_name)
6391
      if hostname != new_name:
6392
        self.LogInfo("Resolved given name '%s' to '%s'", new_name,
6393
                     hostname.name)
6394
      if not utils.MatchNameComponent(self.op.new_name, [hostname.name]):
6395
        raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
6396
                                    " same as given hostname '%s'") %
6397
                                    (hostname.name, self.op.new_name),
6398
                                    errors.ECODE_INVAL)
6399
      new_name = self.op.new_name = hostname.name
6400
      if (self.op.ip_check and
6401
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
6402
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
6403
                                   (hostname.ip, new_name),
6404
                                   errors.ECODE_NOTUNIQUE)
6405

    
6406
    instance_list = self.cfg.GetInstanceList()
6407
    if new_name in instance_list and new_name != instance.name:
6408
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
6409
                                 new_name, errors.ECODE_EXISTS)
6410

    
6411
  def Exec(self, feedback_fn):
6412
    """Rename the instance.
6413

6414
    """
6415
    inst = self.instance
6416
    old_name = inst.name
6417

    
6418
    rename_file_storage = False
6419
    if (inst.disk_template in constants.DTS_FILEBASED and
6420
        self.op.new_name != inst.name):
6421
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
6422
      rename_file_storage = True
6423

    
6424
    self.cfg.RenameInstance(inst.name, self.op.new_name)
6425
    # Change the instance lock. This is definitely safe while we hold the BGL.
6426
    # Otherwise the new lock would have to be added in acquired mode.
6427
    assert self.REQ_BGL
6428
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
6429
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
6430

    
6431
    # re-read the instance from the configuration after rename
6432
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
6433

    
6434
    if rename_file_storage:
6435
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
6436
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
6437
                                                     old_file_storage_dir,
6438
                                                     new_file_storage_dir)
6439
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
6440
                   " (but the instance has been renamed in Ganeti)" %
6441
                   (inst.primary_node, old_file_storage_dir,
6442
                    new_file_storage_dir))
6443

    
6444
    _StartInstanceDisks(self, inst, None)
6445
    try:
6446
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
6447
                                                 old_name, self.op.debug_level)
6448
      msg = result.fail_msg
6449
      if msg:
6450
        msg = ("Could not run OS rename script for instance %s on node %s"
6451
               " (but the instance has been renamed in Ganeti): %s" %
6452
               (inst.name, inst.primary_node, msg))
6453
        self.proc.LogWarning(msg)
6454
    finally:
6455
      _ShutdownInstanceDisks(self, inst)
6456

    
6457
    return inst.name
6458

    
6459

    
6460
class LUInstanceRemove(LogicalUnit):
6461
  """Remove an instance.
6462

6463
  """
6464
  HPATH = "instance-remove"
6465
  HTYPE = constants.HTYPE_INSTANCE
6466
  REQ_BGL = False
6467

    
6468
  def ExpandNames(self):
6469
    self._ExpandAndLockInstance()
6470
    self.needed_locks[locking.LEVEL_NODE] = []
6471
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6472

    
6473
  def DeclareLocks(self, level):
6474
    if level == locking.LEVEL_NODE:
6475
      self._LockInstancesNodes()
6476

    
6477
  def BuildHooksEnv(self):
6478
    """Build hooks env.
6479

6480
    This runs on master, primary and secondary nodes of the instance.
6481

6482
    """
6483
    env = _BuildInstanceHookEnvByObject(self, self.instance)
6484
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
6485
    return env
6486

    
6487
  def BuildHooksNodes(self):
6488
    """Build hooks nodes.
6489

6490
    """
6491
    nl = [self.cfg.GetMasterNode()]
6492
    nl_post = list(self.instance.all_nodes) + nl
6493
    return (nl, nl_post)
6494

    
6495
  def CheckPrereq(self):
6496
    """Check prerequisites.
6497

6498
    This checks that the instance is in the cluster.
6499

6500
    """
6501
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6502
    assert self.instance is not None, \
6503
      "Cannot retrieve locked instance %s" % self.op.instance_name
6504

    
6505
  def Exec(self, feedback_fn):
6506
    """Remove the instance.
6507

6508
    """
6509
    instance = self.instance
6510
    logging.info("Shutting down instance %s on node %s",
6511
                 instance.name, instance.primary_node)
6512

    
6513
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
6514
                                             self.op.shutdown_timeout)
6515
    msg = result.fail_msg
6516
    if msg:
6517
      if self.op.ignore_failures:
6518
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
6519
      else:
6520
        raise errors.OpExecError("Could not shutdown instance %s on"
6521
                                 " node %s: %s" %
6522
                                 (instance.name, instance.primary_node, msg))
6523

    
6524
    _RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
6525

    
6526

    
6527
def _RemoveInstance(lu, feedback_fn, instance, ignore_failures):
6528
  """Utility function to remove an instance.
6529

6530
  """
6531
  logging.info("Removing block devices for instance %s", instance.name)
6532

    
6533
  if not _RemoveDisks(lu, instance):
6534
    if not ignore_failures:
6535
      raise errors.OpExecError("Can't remove instance's disks")
6536
    feedback_fn("Warning: can't remove instance's disks")
6537

    
6538
  logging.info("Removing instance %s out of cluster config", instance.name)
6539

    
6540
  lu.cfg.RemoveInstance(instance.name)
6541

    
6542
  assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
6543
    "Instance lock removal conflict"
6544

    
6545
  # Remove lock for the instance
6546
  lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
6547

    
6548

    
6549
class LUInstanceQuery(NoHooksLU):
6550
  """Logical unit for querying instances.
6551

6552
  """
6553
  # pylint: disable=W0142
6554
  REQ_BGL = False
6555

    
6556
  def CheckArguments(self):
6557
    self.iq = _InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
6558
                             self.op.output_fields, self.op.use_locking)
6559

    
6560
  def ExpandNames(self):
6561
    self.iq.ExpandNames(self)
6562

    
6563
  def DeclareLocks(self, level):
6564
    self.iq.DeclareLocks(self, level)
6565

    
6566
  def Exec(self, feedback_fn):
6567
    return self.iq.OldStyleQuery(self)
6568

    
6569

    
6570
class LUInstanceFailover(LogicalUnit):
6571
  """Failover an instance.
6572

6573
  """
6574
  HPATH = "instance-failover"
6575
  HTYPE = constants.HTYPE_INSTANCE
6576
  REQ_BGL = False
6577

    
6578
  def CheckArguments(self):
6579
    """Check the arguments.
6580

6581
    """
6582
    self.iallocator = getattr(self.op, "iallocator", None)
6583
    self.target_node = getattr(self.op, "target_node", None)
6584

    
6585
  def ExpandNames(self):
6586
    self._ExpandAndLockInstance()
6587

    
6588
    if self.op.target_node is not None:
6589
      self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
6590

    
6591
    self.needed_locks[locking.LEVEL_NODE] = []
6592
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6593

    
6594
    ignore_consistency = self.op.ignore_consistency
6595
    shutdown_timeout = self.op.shutdown_timeout
6596
    self._migrater = TLMigrateInstance(self, self.op.instance_name,
6597
                                       cleanup=False,
6598
                                       failover=True,
6599
                                       ignore_consistency=ignore_consistency,
6600
                                       shutdown_timeout=shutdown_timeout)
6601
    self.tasklets = [self._migrater]
6602

    
6603
  def DeclareLocks(self, level):
6604
    if level == locking.LEVEL_NODE:
6605
      instance = self.context.cfg.GetInstanceInfo(self.op.instance_name)
6606
      if instance.disk_template in constants.DTS_EXT_MIRROR:
6607
        if self.op.target_node is None:
6608
          self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6609
        else:
6610
          self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
6611
                                                   self.op.target_node]
6612
        del self.recalculate_locks[locking.LEVEL_NODE]
6613
      else:
6614
        self._LockInstancesNodes()
6615

    
6616
  def BuildHooksEnv(self):
6617
    """Build hooks env.
6618

6619
    This runs on master, primary and secondary nodes of the instance.
6620

6621
    """
6622
    instance = self._migrater.instance
6623
    source_node = instance.primary_node
6624
    target_node = self.op.target_node
6625
    env = {
6626
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
6627
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
6628
      "OLD_PRIMARY": source_node,
6629
      "NEW_PRIMARY": target_node,
6630
      }
6631

    
6632
    if instance.disk_template in constants.DTS_INT_MIRROR:
6633
      env["OLD_SECONDARY"] = instance.secondary_nodes[0]
6634
      env["NEW_SECONDARY"] = source_node
6635
    else:
6636
      env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = ""
6637

    
6638
    env.update(_BuildInstanceHookEnvByObject(self, instance))
6639

    
6640
    return env
6641

    
6642
  def BuildHooksNodes(self):
6643
    """Build hooks nodes.
6644

6645
    """
6646
    instance = self._migrater.instance
6647
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
6648
    return (nl, nl + [instance.primary_node])
6649

    
6650

    
6651
class LUInstanceMigrate(LogicalUnit):
6652
  """Migrate an instance.
6653

6654
  This is migration without shutting down, compared to the failover,
6655
  which is done with shutdown.
6656

6657
  """
6658
  HPATH = "instance-migrate"
6659
  HTYPE = constants.HTYPE_INSTANCE
6660
  REQ_BGL = False
6661

    
6662
  def ExpandNames(self):
6663
    self._ExpandAndLockInstance()
6664

    
6665
    if self.op.target_node is not None:
6666
      self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
6667

    
6668
    self.needed_locks[locking.LEVEL_NODE] = []
6669
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6670

    
6671
    self._migrater = TLMigrateInstance(self, self.op.instance_name,
6672
                                       cleanup=self.op.cleanup,
6673
                                       failover=False,
6674
                                       fallback=self.op.allow_failover)
6675
    self.tasklets = [self._migrater]
6676

    
6677
  def DeclareLocks(self, level):
6678
    if level == locking.LEVEL_NODE:
6679
      instance = self.context.cfg.GetInstanceInfo(self.op.instance_name)
6680
      if instance.disk_template in constants.DTS_EXT_MIRROR:
6681
        if self.op.target_node is None:
6682
          self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6683
        else:
6684
          self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
6685
                                                   self.op.target_node]
6686
        del self.recalculate_locks[locking.LEVEL_NODE]
6687
      else:
6688
        self._LockInstancesNodes()
6689

    
6690
  def BuildHooksEnv(self):
6691
    """Build hooks env.
6692

6693
    This runs on master, primary and secondary nodes of the instance.
6694

6695
    """
6696
    instance = self._migrater.instance
6697
    source_node = instance.primary_node
6698
    target_node = self.op.target_node
6699
    env = _BuildInstanceHookEnvByObject(self, instance)
6700
    env.update({
6701
      "MIGRATE_LIVE": self._migrater.live,
6702
      "MIGRATE_CLEANUP": self.op.cleanup,
6703
      "OLD_PRIMARY": source_node,
6704
      "NEW_PRIMARY": target_node,
6705
      })
6706

    
6707
    if instance.disk_template in constants.DTS_INT_MIRROR:
6708
      env["OLD_SECONDARY"] = target_node
6709
      env["NEW_SECONDARY"] = source_node
6710
    else:
6711
      env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = None
6712

    
6713
    return env
6714

    
6715
  def BuildHooksNodes(self):
6716
    """Build hooks nodes.
6717

6718
    """
6719
    instance = self._migrater.instance
6720
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
6721
    return (nl, nl + [instance.primary_node])
6722

    
6723

    
6724
class LUInstanceMove(LogicalUnit):
6725
  """Move an instance by data-copying.
6726

6727
  """
6728
  HPATH = "instance-move"
6729
  HTYPE = constants.HTYPE_INSTANCE
6730
  REQ_BGL = False
6731

    
6732
  def ExpandNames(self):
6733
    self._ExpandAndLockInstance()
6734
    target_node = _ExpandNodeName(self.cfg, self.op.target_node)
6735
    self.op.target_node = target_node
6736
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
6737
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6738

    
6739
  def DeclareLocks(self, level):
6740
    if level == locking.LEVEL_NODE:
6741
      self._LockInstancesNodes(primary_only=True)
6742

    
6743
  def BuildHooksEnv(self):
6744
    """Build hooks env.
6745

6746
    This runs on master, primary and secondary nodes of the instance.
6747

6748
    """
6749
    env = {
6750
      "TARGET_NODE": self.op.target_node,
6751
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
6752
      }
6753
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6754
    return env
6755

    
6756
  def BuildHooksNodes(self):
6757
    """Build hooks nodes.
6758

6759
    """
6760
    nl = [
6761
      self.cfg.GetMasterNode(),
6762
      self.instance.primary_node,
6763
      self.op.target_node,
6764
      ]
6765
    return (nl, nl)
6766

    
6767
  def CheckPrereq(self):
6768
    """Check prerequisites.
6769

6770
    This checks that the instance is in the cluster.
6771

6772
    """
6773
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6774
    assert self.instance is not None, \
6775
      "Cannot retrieve locked instance %s" % self.op.instance_name
6776

    
6777
    node = self.cfg.GetNodeInfo(self.op.target_node)
6778
    assert node is not None, \
6779
      "Cannot retrieve locked node %s" % self.op.target_node
6780

    
6781
    self.target_node = target_node = node.name
6782

    
6783
    if target_node == instance.primary_node:
6784
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
6785
                                 (instance.name, target_node),
6786
                                 errors.ECODE_STATE)
6787

    
6788
    bep = self.cfg.GetClusterInfo().FillBE(instance)
6789

    
6790
    for idx, dsk in enumerate(instance.disks):
6791
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
6792
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
6793
                                   " cannot copy" % idx, errors.ECODE_STATE)
6794

    
6795
    _CheckNodeOnline(self, target_node)
6796
    _CheckNodeNotDrained(self, target_node)
6797
    _CheckNodeVmCapable(self, target_node)
6798

    
6799
    if instance.admin_up:
6800
      # check memory requirements on the secondary node
6801
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
6802
                           instance.name, bep[constants.BE_MEMORY],
6803
                           instance.hypervisor)
6804
    else:
6805
      self.LogInfo("Not checking memory on the secondary node as"
6806
                   " instance will not be started")
6807

    
6808
    # check bridge existance
6809
    _CheckInstanceBridgesExist(self, instance, node=target_node)
6810

    
6811
  def Exec(self, feedback_fn):
6812
    """Move an instance.
6813

6814
    The move is done by shutting it down on its present node, copying
6815
    the data over (slow) and starting it on the new node.
6816

6817
    """
6818
    instance = self.instance
6819

    
6820
    source_node = instance.primary_node
6821
    target_node = self.target_node
6822

    
6823
    self.LogInfo("Shutting down instance %s on source node %s",
6824
                 instance.name, source_node)
6825

    
6826
    result = self.rpc.call_instance_shutdown(source_node, instance,
6827
                                             self.op.shutdown_timeout)
6828
    msg = result.fail_msg
6829
    if msg:
6830
      if self.op.ignore_consistency:
6831
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
6832
                             " Proceeding anyway. Please make sure node"
6833
                             " %s is down. Error details: %s",
6834
                             instance.name, source_node, source_node, msg)
6835
      else:
6836
        raise errors.OpExecError("Could not shutdown instance %s on"
6837
                                 " node %s: %s" %
6838
                                 (instance.name, source_node, msg))
6839

    
6840
    # create the target disks
6841
    try:
6842
      _CreateDisks(self, instance, target_node=target_node)
6843
    except errors.OpExecError:
6844
      self.LogWarning("Device creation failed, reverting...")
6845
      try:
6846
        _RemoveDisks(self, instance, target_node=target_node)
6847
      finally:
6848
        self.cfg.ReleaseDRBDMinors(instance.name)
6849
        raise
6850

    
6851
    cluster_name = self.cfg.GetClusterInfo().cluster_name
6852

    
6853
    errs = []
6854
    # activate, get path, copy the data over
6855
    for idx, disk in enumerate(instance.disks):
6856
      self.LogInfo("Copying data for disk %d", idx)
6857
      result = self.rpc.call_blockdev_assemble(target_node, disk,
6858
                                               instance.name, True, idx)
6859
      if result.fail_msg:
6860
        self.LogWarning("Can't assemble newly created disk %d: %s",
6861
                        idx, result.fail_msg)
6862
        errs.append(result.fail_msg)
6863
        break
6864
      dev_path = result.payload
6865
      result = self.rpc.call_blockdev_export(source_node, disk,
6866
                                             target_node, dev_path,
6867
                                             cluster_name)
6868
      if result.fail_msg:
6869
        self.LogWarning("Can't copy data over for disk %d: %s",
6870
                        idx, result.fail_msg)
6871
        errs.append(result.fail_msg)
6872
        break
6873

    
6874
    if errs:
6875
      self.LogWarning("Some disks failed to copy, aborting")
6876
      try:
6877
        _RemoveDisks(self, instance, target_node=target_node)
6878
      finally:
6879
        self.cfg.ReleaseDRBDMinors(instance.name)
6880
        raise errors.OpExecError("Errors during disk copy: %s" %
6881
                                 (",".join(errs),))
6882

    
6883
    instance.primary_node = target_node
6884
    self.cfg.Update(instance, feedback_fn)
6885

    
6886
    self.LogInfo("Removing the disks on the original node")
6887
    _RemoveDisks(self, instance, target_node=source_node)
6888

    
6889
    # Only start the instance if it's marked as up
6890
    if instance.admin_up:
6891
      self.LogInfo("Starting instance %s on node %s",
6892
                   instance.name, target_node)
6893

    
6894
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
6895
                                           ignore_secondaries=True)
6896
      if not disks_ok:
6897
        _ShutdownInstanceDisks(self, instance)
6898
        raise errors.OpExecError("Can't activate the instance's disks")
6899

    
6900
      result = self.rpc.call_instance_start(target_node, instance,
6901
                                            None, None, False)
6902
      msg = result.fail_msg
6903
      if msg:
6904
        _ShutdownInstanceDisks(self, instance)
6905
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
6906
                                 (instance.name, target_node, msg))
6907

    
6908

    
6909
class LUNodeMigrate(LogicalUnit):
6910
  """Migrate all instances from a node.
6911

6912
  """
6913
  HPATH = "node-migrate"
6914
  HTYPE = constants.HTYPE_NODE
6915
  REQ_BGL = False
6916

    
6917
  def CheckArguments(self):
6918
    pass
6919

    
6920
  def ExpandNames(self):
6921
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
6922

    
6923
    self.share_locks = _ShareAll()
6924
    self.needed_locks = {
6925
      locking.LEVEL_NODE: [self.op.node_name],
6926
      }
6927

    
6928
  def BuildHooksEnv(self):
6929
    """Build hooks env.
6930

6931
    This runs on the master, the primary and all the secondaries.
6932

6933
    """
6934
    return {
6935
      "NODE_NAME": self.op.node_name,
6936
      }
6937

    
6938
  def BuildHooksNodes(self):
6939
    """Build hooks nodes.
6940

6941
    """
6942
    nl = [self.cfg.GetMasterNode()]
6943
    return (nl, nl)
6944

    
6945
  def CheckPrereq(self):
6946
    pass
6947

    
6948
  def Exec(self, feedback_fn):
6949
    # Prepare jobs for migration instances
6950
    jobs = [
6951
      [opcodes.OpInstanceMigrate(instance_name=inst.name,
6952
                                 mode=self.op.mode,
6953
                                 live=self.op.live,
6954
                                 iallocator=self.op.iallocator,
6955
                                 target_node=self.op.target_node)]
6956
      for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name)
6957
      ]
6958

    
6959
    # TODO: Run iallocator in this opcode and pass correct placement options to
6960
    # OpInstanceMigrate. Since other jobs can modify the cluster between
6961
    # running the iallocator and the actual migration, a good consistency model
6962
    # will have to be found.
6963

    
6964
    assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
6965
            frozenset([self.op.node_name]))
6966

    
6967
    return ResultWithJobs(jobs)
6968

    
6969

    
6970
class TLMigrateInstance(Tasklet):
6971
  """Tasklet class for instance migration.
6972

6973
  @type live: boolean
6974
  @ivar live: whether the migration will be done live or non-live;
6975
      this variable is initalized only after CheckPrereq has run
6976
  @type cleanup: boolean
6977
  @ivar cleanup: Wheater we cleanup from a failed migration
6978
  @type iallocator: string
6979
  @ivar iallocator: The iallocator used to determine target_node
6980
  @type target_node: string
6981
  @ivar target_node: If given, the target_node to reallocate the instance to
6982
  @type failover: boolean
6983
  @ivar failover: Whether operation results in failover or migration
6984
  @type fallback: boolean
6985
  @ivar fallback: Whether fallback to failover is allowed if migration not
6986
                  possible
6987
  @type ignore_consistency: boolean
6988
  @ivar ignore_consistency: Wheter we should ignore consistency between source
6989
                            and target node
6990
  @type shutdown_timeout: int
6991
  @ivar shutdown_timeout: In case of failover timeout of the shutdown
6992

6993
  """
6994
  def __init__(self, lu, instance_name, cleanup=False,
6995
               failover=False, fallback=False,
6996
               ignore_consistency=False,
6997
               shutdown_timeout=constants.DEFAULT_SHUTDOWN_TIMEOUT):
6998
    """Initializes this class.
6999

7000
    """
7001
    Tasklet.__init__(self, lu)
7002

    
7003
    # Parameters
7004
    self.instance_name = instance_name
7005
    self.cleanup = cleanup
7006
    self.live = False # will be overridden later
7007
    self.failover = failover
7008
    self.fallback = fallback
7009
    self.ignore_consistency = ignore_consistency
7010
    self.shutdown_timeout = shutdown_timeout
7011

    
7012
  def CheckPrereq(self):
7013
    """Check prerequisites.
7014

7015
    This checks that the instance is in the cluster.
7016

7017
    """
7018
    instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
7019
    instance = self.cfg.GetInstanceInfo(instance_name)
7020
    assert instance is not None
7021
    self.instance = instance
7022

    
7023
    if (not self.cleanup and not instance.admin_up and not self.failover and
7024
        self.fallback):
7025
      self.lu.LogInfo("Instance is marked down, fallback allowed, switching"
7026
                      " to failover")
7027
      self.failover = True
7028

    
7029
    if instance.disk_template not in constants.DTS_MIRRORED:
7030
      if self.failover:
7031
        text = "failovers"
7032
      else:
7033
        text = "migrations"
7034
      raise errors.OpPrereqError("Instance's disk layout '%s' does not allow"
7035
                                 " %s" % (instance.disk_template, text),
7036
                                 errors.ECODE_STATE)
7037

    
7038
    if instance.disk_template in constants.DTS_EXT_MIRROR:
7039
      _CheckIAllocatorOrNode(self.lu, "iallocator", "target_node")
7040

    
7041
      if self.lu.op.iallocator:
7042
        self._RunAllocator()
7043
      else:
7044
        # We set set self.target_node as it is required by
7045
        # BuildHooksEnv
7046
        self.target_node = self.lu.op.target_node
7047

    
7048
      # self.target_node is already populated, either directly or by the
7049
      # iallocator run
7050
      target_node = self.target_node
7051
      if self.target_node == instance.primary_node:
7052
        raise errors.OpPrereqError("Cannot migrate instance %s"
7053
                                   " to its primary (%s)" %
7054
                                   (instance.name, instance.primary_node))
7055

    
7056
      if len(self.lu.tasklets) == 1:
7057
        # It is safe to release locks only when we're the only tasklet
7058
        # in the LU
7059
        _ReleaseLocks(self.lu, locking.LEVEL_NODE,
7060
                      keep=[instance.primary_node, self.target_node])
7061

    
7062
    else:
7063
      secondary_nodes = instance.secondary_nodes
7064
      if not secondary_nodes:
7065
        raise errors.ConfigurationError("No secondary node but using"
7066
                                        " %s disk template" %
7067
                                        instance.disk_template)
7068
      target_node = secondary_nodes[0]
7069
      if self.lu.op.iallocator or (self.lu.op.target_node and
7070
                                   self.lu.op.target_node != target_node):
7071
        if self.failover:
7072
          text = "failed over"
7073
        else:
7074
          text = "migrated"
7075
        raise errors.OpPrereqError("Instances with disk template %s cannot"
7076
                                   " be %s to arbitrary nodes"
7077
                                   " (neither an iallocator nor a target"
7078
                                   " node can be passed)" %
7079
                                   (instance.disk_template, text),
7080
                                   errors.ECODE_INVAL)
7081

    
7082
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
7083

    
7084
    # check memory requirements on the secondary node
7085
    if not self.failover or instance.admin_up:
7086
      _CheckNodeFreeMemory(self.lu, target_node, "migrating instance %s" %
7087
                           instance.name, i_be[constants.BE_MEMORY],
7088
                           instance.hypervisor)
7089
    else:
7090
      self.lu.LogInfo("Not checking memory on the secondary node as"
7091
                      " instance will not be started")
7092

    
7093
    # check bridge existance
7094
    _CheckInstanceBridgesExist(self.lu, instance, node=target_node)
7095

    
7096
    if not self.cleanup:
7097
      _CheckNodeNotDrained(self.lu, target_node)
7098
      if not self.failover:
7099
        result = self.rpc.call_instance_migratable(instance.primary_node,
7100
                                                   instance)
7101
        if result.fail_msg and self.fallback:
7102
          self.lu.LogInfo("Can't migrate, instance offline, fallback to"
7103
                          " failover")
7104
          self.failover = True
7105
        else:
7106
          result.Raise("Can't migrate, please use failover",
7107
                       prereq=True, ecode=errors.ECODE_STATE)
7108

    
7109
    assert not (self.failover and self.cleanup)
7110

    
7111
    if not self.failover:
7112
      if self.lu.op.live is not None and self.lu.op.mode is not None:
7113
        raise errors.OpPrereqError("Only one of the 'live' and 'mode'"
7114
                                   " parameters are accepted",
7115
                                   errors.ECODE_INVAL)
7116
      if self.lu.op.live is not None:
7117
        if self.lu.op.live:
7118
          self.lu.op.mode = constants.HT_MIGRATION_LIVE
7119
        else:
7120
          self.lu.op.mode = constants.HT_MIGRATION_NONLIVE
7121
        # reset the 'live' parameter to None so that repeated
7122
        # invocations of CheckPrereq do not raise an exception
7123
        self.lu.op.live = None
7124
      elif self.lu.op.mode is None:
7125
        # read the default value from the hypervisor
7126
        i_hv = self.cfg.GetClusterInfo().FillHV(self.instance,
7127
                                                skip_globals=False)
7128
        self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
7129

    
7130
      self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
7131
    else:
7132
      # Failover is never live
7133
      self.live = False
7134

    
7135
  def _RunAllocator(self):
7136
    """Run the allocator based on input opcode.
7137

7138
    """
7139
    ial = IAllocator(self.cfg, self.rpc,
7140
                     mode=constants.IALLOCATOR_MODE_RELOC,
7141
                     name=self.instance_name,
7142
                     # TODO See why hail breaks with a single node below
7143
                     relocate_from=[self.instance.primary_node,
7144
                                    self.instance.primary_node],
7145
                     )
7146

    
7147
    ial.Run(self.lu.op.iallocator)
7148

    
7149
    if not ial.success:
7150
      raise errors.OpPrereqError("Can't compute nodes using"
7151
                                 " iallocator '%s': %s" %
7152
                                 (self.lu.op.iallocator, ial.info),
7153
                                 errors.ECODE_NORES)
7154
    if len(ial.result) != ial.required_nodes:
7155
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
7156
                                 " of nodes (%s), required %s" %
7157
                                 (self.lu.op.iallocator, len(ial.result),
7158
                                  ial.required_nodes), errors.ECODE_FAULT)
7159
    self.target_node = ial.result[0]
7160
    self.lu.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
7161
                 self.instance_name, self.lu.op.iallocator,
7162
                 utils.CommaJoin(ial.result))
7163

    
7164
  def _WaitUntilSync(self):
7165
    """Poll with custom rpc for disk sync.
7166

7167
    This uses our own step-based rpc call.
7168

7169
    """
7170
    self.feedback_fn("* wait until resync is done")
7171
    all_done = False
7172
    while not all_done:
7173
      all_done = True
7174
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
7175
                                            self.nodes_ip,
7176
                                            self.instance.disks)
7177
      min_percent = 100
7178
      for node, nres in result.items():
7179
        nres.Raise("Cannot resync disks on node %s" % node)
7180
        node_done, node_percent = nres.payload
7181
        all_done = all_done and node_done
7182
        if node_percent is not None:
7183
          min_percent = min(min_percent, node_percent)
7184
      if not all_done:
7185
        if min_percent < 100:
7186
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
7187
        time.sleep(2)
7188

    
7189
  def _EnsureSecondary(self, node):
7190
    """Demote a node to secondary.
7191

7192
    """
7193
    self.feedback_fn("* switching node %s to secondary mode" % node)
7194

    
7195
    for dev in self.instance.disks:
7196
      self.cfg.SetDiskID(dev, node)
7197

    
7198
    result = self.rpc.call_blockdev_close(node, self.instance.name,
7199
                                          self.instance.disks)
7200
    result.Raise("Cannot change disk to secondary on node %s" % node)
7201

    
7202
  def _GoStandalone(self):
7203
    """Disconnect from the network.
7204

7205
    """
7206
    self.feedback_fn("* changing into standalone mode")
7207
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
7208
                                               self.instance.disks)
7209
    for node, nres in result.items():
7210
      nres.Raise("Cannot disconnect disks node %s" % node)
7211

    
7212
  def _GoReconnect(self, multimaster):
7213
    """Reconnect to the network.
7214

7215
    """
7216
    if multimaster:
7217
      msg = "dual-master"
7218
    else:
7219
      msg = "single-master"
7220
    self.feedback_fn("* changing disks into %s mode" % msg)
7221
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
7222
                                           self.instance.disks,
7223
                                           self.instance.name, multimaster)
7224
    for node, nres in result.items():
7225
      nres.Raise("Cannot change disks config on node %s" % node)
7226

    
7227
  def _ExecCleanup(self):
7228
    """Try to cleanup after a failed migration.
7229

7230
    The cleanup is done by:
7231
      - check that the instance is running only on one node
7232
        (and update the config if needed)
7233
      - change disks on its secondary node to secondary
7234
      - wait until disks are fully synchronized
7235
      - disconnect from the network
7236
      - change disks into single-master mode
7237
      - wait again until disks are fully synchronized
7238

7239
    """
7240
    instance = self.instance
7241
    target_node = self.target_node
7242
    source_node = self.source_node
7243

    
7244
    # check running on only one node
7245
    self.feedback_fn("* checking where the instance actually runs"
7246
                     " (if this hangs, the hypervisor might be in"
7247
                     " a bad state)")
7248
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
7249
    for node, result in ins_l.items():
7250
      result.Raise("Can't contact node %s" % node)
7251

    
7252
    runningon_source = instance.name in ins_l[source_node].payload
7253
    runningon_target = instance.name in ins_l[target_node].payload
7254

    
7255
    if runningon_source and runningon_target:
7256
      raise errors.OpExecError("Instance seems to be running on two nodes,"
7257
                               " or the hypervisor is confused; you will have"
7258
                               " to ensure manually that it runs only on one"
7259
                               " and restart this operation")
7260

    
7261
    if not (runningon_source or runningon_target):
7262
      raise errors.OpExecError("Instance does not seem to be running at all;"
7263
                               " in this case it's safer to repair by"
7264
                               " running 'gnt-instance stop' to ensure disk"
7265
                               " shutdown, and then restarting it")
7266

    
7267
    if runningon_target:
7268
      # the migration has actually succeeded, we need to update the config
7269
      self.feedback_fn("* instance running on secondary node (%s),"
7270
                       " updating config" % target_node)
7271
      instance.primary_node = target_node
7272
      self.cfg.Update(instance, self.feedback_fn)
7273
      demoted_node = source_node
7274
    else:
7275
      self.feedback_fn("* instance confirmed to be running on its"
7276
                       " primary node (%s)" % source_node)
7277
      demoted_node = target_node
7278

    
7279
    if instance.disk_template in constants.DTS_INT_MIRROR:
7280
      self._EnsureSecondary(demoted_node)
7281
      try:
7282
        self._WaitUntilSync()
7283
      except errors.OpExecError:
7284
        # we ignore here errors, since if the device is standalone, it
7285
        # won't be able to sync
7286
        pass
7287
      self._GoStandalone()
7288
      self._GoReconnect(False)
7289
      self._WaitUntilSync()
7290

    
7291
    self.feedback_fn("* done")
7292

    
7293
  def _RevertDiskStatus(self):
7294
    """Try to revert the disk status after a failed migration.
7295

7296
    """
7297
    target_node = self.target_node
7298
    if self.instance.disk_template in constants.DTS_EXT_MIRROR:
7299
      return
7300

    
7301
    try:
7302
      self._EnsureSecondary(target_node)
7303
      self._GoStandalone()
7304
      self._GoReconnect(False)
7305
      self._WaitUntilSync()
7306
    except errors.OpExecError, err:
7307
      self.lu.LogWarning("Migration failed and I can't reconnect the drives,"
7308
                         " please try to recover the instance manually;"
7309
                         " error '%s'" % str(err))
7310

    
7311
  def _AbortMigration(self):
7312
    """Call the hypervisor code to abort a started migration.
7313

7314
    """
7315
    instance = self.instance
7316
    target_node = self.target_node
7317
    migration_info = self.migration_info
7318

    
7319
    abort_result = self.rpc.call_finalize_migration(target_node,
7320
                                                    instance,
7321
                                                    migration_info,
7322
                                                    False)
7323
    abort_msg = abort_result.fail_msg
7324
    if abort_msg:
7325
      logging.error("Aborting migration failed on target node %s: %s",
7326
                    target_node, abort_msg)
7327
      # Don't raise an exception here, as we stil have to try to revert the
7328
      # disk status, even if this step failed.
7329

    
7330
  def _ExecMigration(self):
7331
    """Migrate an instance.
7332

7333
    The migrate is done by:
7334
      - change the disks into dual-master mode
7335
      - wait until disks are fully synchronized again
7336
      - migrate the instance
7337
      - change disks on the new secondary node (the old primary) to secondary
7338
      - wait until disks are fully synchronized
7339
      - change disks into single-master mode
7340

7341
    """
7342
    instance = self.instance
7343
    target_node = self.target_node
7344
    source_node = self.source_node
7345

    
7346
    # Check for hypervisor version mismatch and warn the user.
7347
    nodeinfo = self.rpc.call_node_info([source_node, target_node],
7348
                                       None, self.instance.hypervisor)
7349
    src_info = nodeinfo[source_node]
7350
    dst_info = nodeinfo[target_node]
7351

    
7352
    if ((constants.HV_NODEINFO_KEY_VERSION in src_info.payload) and
7353
        (constants.HV_NODEINFO_KEY_VERSION in dst_info.payload)):
7354
      src_version = src_info.payload[constants.HV_NODEINFO_KEY_VERSION]
7355
      dst_version = dst_info.payload[constants.HV_NODEINFO_KEY_VERSION]
7356
      if src_version != dst_version:
7357
        self.feedback_fn("* warning: hypervisor version mismatch between"
7358
                         " source (%s) and target (%s) node" %
7359
                         (src_version, dst_version))
7360

    
7361
    self.feedback_fn("* checking disk consistency between source and target")
7362
    for dev in instance.disks:
7363
      if not _CheckDiskConsistency(self.lu, dev, target_node, False):
7364
        raise errors.OpExecError("Disk %s is degraded or not fully"
7365
                                 " synchronized on target node,"
7366
                                 " aborting migration" % dev.iv_name)
7367

    
7368
    # First get the migration information from the remote node
7369
    result = self.rpc.call_migration_info(source_node, instance)
7370
    msg = result.fail_msg
7371
    if msg:
7372
      log_err = ("Failed fetching source migration information from %s: %s" %
7373
                 (source_node, msg))
7374
      logging.error(log_err)
7375
      raise errors.OpExecError(log_err)
7376

    
7377
    self.migration_info = migration_info = result.payload
7378

    
7379
    if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
7380
      # Then switch the disks to master/master mode
7381
      self._EnsureSecondary(target_node)
7382
      self._GoStandalone()
7383
      self._GoReconnect(True)
7384
      self._WaitUntilSync()
7385

    
7386
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
7387
    result = self.rpc.call_accept_instance(target_node,
7388
                                           instance,
7389
                                           migration_info,
7390
                                           self.nodes_ip[target_node])
7391

    
7392
    msg = result.fail_msg
7393
    if msg:
7394
      logging.error("Instance pre-migration failed, trying to revert"
7395
                    " disk status: %s", msg)
7396
      self.feedback_fn("Pre-migration failed, aborting")
7397
      self._AbortMigration()
7398
      self._RevertDiskStatus()
7399
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
7400
                               (instance.name, msg))
7401

    
7402
    self.feedback_fn("* migrating instance to %s" % target_node)
7403
    result = self.rpc.call_instance_migrate(source_node, instance,
7404
                                            self.nodes_ip[target_node],
7405
                                            self.live)
7406
    msg = result.fail_msg
7407
    if msg:
7408
      logging.error("Instance migration failed, trying to revert"
7409
                    " disk status: %s", msg)
7410
      self.feedback_fn("Migration failed, aborting")
7411
      self._AbortMigration()
7412
      self._RevertDiskStatus()
7413
      raise errors.OpExecError("Could not migrate instance %s: %s" %
7414
                               (instance.name, msg))
7415

    
7416
    instance.primary_node = target_node
7417
    # distribute new instance config to the other nodes
7418
    self.cfg.Update(instance, self.feedback_fn)
7419

    
7420
    result = self.rpc.call_finalize_migration(target_node,
7421
                                              instance,
7422
                                              migration_info,
7423
                                              True)
7424
    msg = result.fail_msg
7425
    if msg:
7426
      logging.error("Instance migration succeeded, but finalization failed:"
7427
                    " %s", msg)
7428
      raise errors.OpExecError("Could not finalize instance migration: %s" %
7429
                               msg)
7430

    
7431
    if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
7432
      self._EnsureSecondary(source_node)
7433
      self._WaitUntilSync()
7434
      self._GoStandalone()
7435
      self._GoReconnect(False)
7436
      self._WaitUntilSync()
7437

    
7438
    self.feedback_fn("* done")
7439

    
7440
  def _ExecFailover(self):
7441
    """Failover an instance.
7442

7443
    The failover is done by shutting it down on its present node and
7444
    starting it on the secondary.
7445

7446
    """
7447
    instance = self.instance
7448
    primary_node = self.cfg.GetNodeInfo(instance.primary_node)
7449

    
7450
    source_node = instance.primary_node
7451
    target_node = self.target_node
7452

    
7453
    if instance.admin_up:
7454
      self.feedback_fn("* checking disk consistency between source and target")
7455
      for dev in instance.disks:
7456
        # for drbd, these are drbd over lvm
7457
        if not _CheckDiskConsistency(self.lu, dev, target_node, False):
7458
          if primary_node.offline:
7459
            self.feedback_fn("Node %s is offline, ignoring degraded disk %s on"
7460
                             " target node %s" %
7461
                             (primary_node.name, dev.iv_name, target_node))
7462
          elif not self.ignore_consistency:
7463
            raise errors.OpExecError("Disk %s is degraded on target node,"
7464
                                     " aborting failover" % dev.iv_name)
7465
    else:
7466
      self.feedback_fn("* not checking disk consistency as instance is not"
7467
                       " running")
7468

    
7469
    self.feedback_fn("* shutting down instance on source node")
7470
    logging.info("Shutting down instance %s on node %s",
7471
                 instance.name, source_node)
7472

    
7473
    result = self.rpc.call_instance_shutdown(source_node, instance,
7474
                                             self.shutdown_timeout)
7475
    msg = result.fail_msg
7476
    if msg:
7477
      if self.ignore_consistency or primary_node.offline:
7478
        self.lu.LogWarning("Could not shutdown instance %s on node %s,"
7479
                           " proceeding anyway; please make sure node"
7480
                           " %s is down; error details: %s",
7481
                           instance.name, source_node, source_node, msg)
7482
      else:
7483
        raise errors.OpExecError("Could not shutdown instance %s on"
7484
                                 " node %s: %s" %
7485
                                 (instance.name, source_node, msg))
7486

    
7487
    self.feedback_fn("* deactivating the instance's disks on source node")
7488
    if not _ShutdownInstanceDisks(self.lu, instance, ignore_primary=True):
7489
      raise errors.OpExecError("Can't shut down the instance's disks")
7490

    
7491
    instance.primary_node = target_node
7492
    # distribute new instance config to the other nodes
7493
    self.cfg.Update(instance, self.feedback_fn)
7494

    
7495
    # Only start the instance if it's marked as up
7496
    if instance.admin_up:
7497
      self.feedback_fn("* activating the instance's disks on target node %s" %
7498
                       target_node)
7499
      logging.info("Starting instance %s on node %s",
7500
                   instance.name, target_node)
7501

    
7502
      disks_ok, _ = _AssembleInstanceDisks(self.lu, instance,
7503
                                           ignore_secondaries=True)
7504
      if not disks_ok:
7505
        _ShutdownInstanceDisks(self.lu, instance)
7506
        raise errors.OpExecError("Can't activate the instance's disks")
7507

    
7508
      self.feedback_fn("* starting the instance on the target node %s" %
7509
                       target_node)
7510
      result = self.rpc.call_instance_start(target_node, instance, None, None,
7511
                                            False)
7512
      msg = result.fail_msg
7513
      if msg:
7514
        _ShutdownInstanceDisks(self.lu, instance)
7515
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
7516
                                 (instance.name, target_node, msg))
7517

    
7518
  def Exec(self, feedback_fn):
7519
    """Perform the migration.
7520

7521
    """
7522
    self.feedback_fn = feedback_fn
7523
    self.source_node = self.instance.primary_node
7524

    
7525
    # FIXME: if we implement migrate-to-any in DRBD, this needs fixing
7526
    if self.instance.disk_template in constants.DTS_INT_MIRROR:
7527
      self.target_node = self.instance.secondary_nodes[0]
7528
      # Otherwise self.target_node has been populated either
7529
      # directly, or through an iallocator.
7530

    
7531
    self.all_nodes = [self.source_node, self.target_node]
7532
    self.nodes_ip = dict((name, node.secondary_ip) for (name, node)
7533
                         in self.cfg.GetMultiNodeInfo(self.all_nodes))
7534

    
7535
    if self.failover:
7536
      feedback_fn("Failover instance %s" % self.instance.name)
7537
      self._ExecFailover()
7538
    else:
7539
      feedback_fn("Migrating instance %s" % self.instance.name)
7540

    
7541
      if self.cleanup:
7542
        return self._ExecCleanup()
7543
      else:
7544
        return self._ExecMigration()
7545

    
7546

    
7547
def _CreateBlockDev(lu, node, instance, device, force_create,
7548
                    info, force_open):
7549
  """Create a tree of block devices on a given node.
7550

7551
  If this device type has to be created on secondaries, create it and
7552
  all its children.
7553

7554
  If not, just recurse to children keeping the same 'force' value.
7555

7556
  @param lu: the lu on whose behalf we execute
7557
  @param node: the node on which to create the device
7558
  @type instance: L{objects.Instance}
7559
  @param instance: the instance which owns the device
7560
  @type device: L{objects.Disk}
7561
  @param device: the device to create
7562
  @type force_create: boolean
7563
  @param force_create: whether to force creation of this device; this
7564
      will be change to True whenever we find a device which has
7565
      CreateOnSecondary() attribute
7566
  @param info: the extra 'metadata' we should attach to the device
7567
      (this will be represented as a LVM tag)
7568
  @type force_open: boolean
7569
  @param force_open: this parameter will be passes to the
7570
      L{backend.BlockdevCreate} function where it specifies
7571
      whether we run on primary or not, and it affects both
7572
      the child assembly and the device own Open() execution
7573

7574
  """
7575
  if device.CreateOnSecondary():
7576
    force_create = True
7577

    
7578
  if device.children:
7579
    for child in device.children:
7580
      _CreateBlockDev(lu, node, instance, child, force_create,
7581
                      info, force_open)
7582

    
7583
  if not force_create:
7584
    return
7585

    
7586
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
7587

    
7588

    
7589
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
7590
  """Create a single block device on a given node.
7591

7592
  This will not recurse over children of the device, so they must be
7593
  created in advance.
7594

7595
  @param lu: the lu on whose behalf we execute
7596
  @param node: the node on which to create the device
7597
  @type instance: L{objects.Instance}
7598
  @param instance: the instance which owns the device
7599
  @type device: L{objects.Disk}
7600
  @param device: the device to create
7601
  @param info: the extra 'metadata' we should attach to the device
7602
      (this will be represented as a LVM tag)
7603
  @type force_open: boolean
7604
  @param force_open: this parameter will be passes to the
7605
      L{backend.BlockdevCreate} function where it specifies
7606
      whether we run on primary or not, and it affects both
7607
      the child assembly and the device own Open() execution
7608

7609
  """
7610
  lu.cfg.SetDiskID(device, node)
7611
  result = lu.rpc.call_blockdev_create(node, device, device.size,
7612
                                       instance.name, force_open, info)
7613
  result.Raise("Can't create block device %s on"
7614
               " node %s for instance %s" % (device, node, instance.name))
7615
  if device.physical_id is None:
7616
    device.physical_id = result.payload
7617

    
7618

    
7619
def _GenerateUniqueNames(lu, exts):
7620
  """Generate a suitable LV name.
7621

7622
  This will generate a logical volume name for the given instance.
7623

7624
  """
7625
  results = []
7626
  for val in exts:
7627
    new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
7628
    results.append("%s%s" % (new_id, val))
7629
  return results
7630

    
7631

    
7632
def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names,
7633
                         iv_name, p_minor, s_minor):
7634
  """Generate a drbd8 device complete with its children.
7635

7636
  """
7637
  assert len(vgnames) == len(names) == 2
7638
  port = lu.cfg.AllocatePort()
7639
  shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
7640
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
7641
                          logical_id=(vgnames[0], names[0]))
7642
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
7643
                          logical_id=(vgnames[1], names[1]))
7644
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
7645
                          logical_id=(primary, secondary, port,
7646
                                      p_minor, s_minor,
7647
                                      shared_secret),
7648
                          children=[dev_data, dev_meta],
7649
                          iv_name=iv_name)
7650
  return drbd_dev
7651

    
7652

    
7653
def _GenerateDiskTemplate(lu, template_name,
7654
                          instance_name, primary_node,
7655
                          secondary_nodes, disk_info,
7656
                          file_storage_dir, file_driver,
7657
                          base_index, feedback_fn):
7658
  """Generate the entire disk layout for a given template type.
7659

7660
  """
7661
  #TODO: compute space requirements
7662

    
7663
  vgname = lu.cfg.GetVGName()
7664
  disk_count = len(disk_info)
7665
  disks = []
7666
  if template_name == constants.DT_DISKLESS:
7667
    pass
7668
  elif template_name == constants.DT_PLAIN:
7669
    if len(secondary_nodes) != 0:
7670
      raise errors.ProgrammerError("Wrong template configuration")
7671

    
7672
    names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
7673
                                      for i in range(disk_count)])
7674
    for idx, disk in enumerate(disk_info):
7675
      disk_index = idx + base_index
7676
      vg = disk.get(constants.IDISK_VG, vgname)
7677
      feedback_fn("* disk %i, vg %s, name %s" % (idx, vg, names[idx]))
7678
      disk_dev = objects.Disk(dev_type=constants.LD_LV,
7679
                              size=disk[constants.IDISK_SIZE],
7680
                              logical_id=(vg, names[idx]),
7681
                              iv_name="disk/%d" % disk_index,
7682
                              mode=disk[constants.IDISK_MODE])
7683
      disks.append(disk_dev)
7684
  elif template_name == constants.DT_DRBD8:
7685
    if len(secondary_nodes) != 1:
7686
      raise errors.ProgrammerError("Wrong template configuration")
7687
    remote_node = secondary_nodes[0]
7688
    minors = lu.cfg.AllocateDRBDMinor(
7689
      [primary_node, remote_node] * len(disk_info), instance_name)
7690

    
7691
    names = []
7692
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
7693
                                               for i in range(disk_count)]):
7694
      names.append(lv_prefix + "_data")
7695
      names.append(lv_prefix + "_meta")
7696
    for idx, disk in enumerate(disk_info):
7697
      disk_index = idx + base_index
7698
      data_vg = disk.get(constants.IDISK_VG, vgname)
7699
      meta_vg = disk.get(constants.IDISK_METAVG, data_vg)
7700
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
7701
                                      disk[constants.IDISK_SIZE],
7702
                                      [data_vg, meta_vg],
7703
                                      names[idx * 2:idx * 2 + 2],
7704
                                      "disk/%d" % disk_index,
7705
                                      minors[idx * 2], minors[idx * 2 + 1])
7706
      disk_dev.mode = disk[constants.IDISK_MODE]
7707
      disks.append(disk_dev)
7708
  elif template_name == constants.DT_FILE:
7709
    if len(secondary_nodes) != 0:
7710
      raise errors.ProgrammerError("Wrong template configuration")
7711

    
7712
    opcodes.RequireFileStorage()
7713

    
7714
    for idx, disk in enumerate(disk_info):
7715
      disk_index = idx + base_index
7716
      disk_dev = objects.Disk(dev_type=constants.LD_FILE,
7717
                              size=disk[constants.IDISK_SIZE],
7718
                              iv_name="disk/%d" % disk_index,
7719
                              logical_id=(file_driver,
7720
                                          "%s/disk%d" % (file_storage_dir,
7721
                                                         disk_index)),
7722
                              mode=disk[constants.IDISK_MODE])
7723
      disks.append(disk_dev)
7724
  elif template_name == constants.DT_SHARED_FILE:
7725
    if len(secondary_nodes) != 0:
7726
      raise errors.ProgrammerError("Wrong template configuration")
7727

    
7728
    opcodes.RequireSharedFileStorage()
7729

    
7730
    for idx, disk in enumerate(disk_info):
7731
      disk_index = idx + base_index
7732
      disk_dev = objects.Disk(dev_type=constants.LD_FILE,
7733
                              size=disk[constants.IDISK_SIZE],
7734
                              iv_name="disk/%d" % disk_index,
7735
                              logical_id=(file_driver,
7736
                                          "%s/disk%d" % (file_storage_dir,
7737
                                                         disk_index)),
7738
                              mode=disk[constants.IDISK_MODE])
7739
      disks.append(disk_dev)
7740
  elif template_name == constants.DT_BLOCK:
7741
    if len(secondary_nodes) != 0:
7742
      raise errors.ProgrammerError("Wrong template configuration")
7743

    
7744
    for idx, disk in enumerate(disk_info):
7745
      disk_index = idx + base_index
7746
      disk_dev = objects.Disk(dev_type=constants.LD_BLOCKDEV,
7747
                              size=disk[constants.IDISK_SIZE],
7748
                              logical_id=(constants.BLOCKDEV_DRIVER_MANUAL,
7749
                                          disk[constants.IDISK_ADOPT]),
7750
                              iv_name="disk/%d" % disk_index,
7751
                              mode=disk[constants.IDISK_MODE])
7752
      disks.append(disk_dev)
7753

    
7754
  else:
7755
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
7756
  return disks
7757

    
7758

    
7759
def _GetInstanceInfoText(instance):
7760
  """Compute that text that should be added to the disk's metadata.
7761

7762
  """
7763
  return "originstname+%s" % instance.name
7764

    
7765

    
7766
def _CalcEta(time_taken, written, total_size):
7767
  """Calculates the ETA based on size written and total size.
7768

7769
  @param time_taken: The time taken so far
7770
  @param written: amount written so far
7771
  @param total_size: The total size of data to be written
7772
  @return: The remaining time in seconds
7773

7774
  """
7775
  avg_time = time_taken / float(written)
7776
  return (total_size - written) * avg_time
7777

    
7778

    
7779
def _WipeDisks(lu, instance):
7780
  """Wipes instance disks.
7781

7782
  @type lu: L{LogicalUnit}
7783
  @param lu: the logical unit on whose behalf we execute
7784
  @type instance: L{objects.Instance}
7785
  @param instance: the instance whose disks we should create
7786
  @return: the success of the wipe
7787

7788
  """
7789
  node = instance.primary_node
7790

    
7791
  for device in instance.disks:
7792
    lu.cfg.SetDiskID(device, node)
7793

    
7794
  logging.info("Pause sync of instance %s disks", instance.name)
7795
  result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, True)
7796

    
7797
  for idx, success in enumerate(result.payload):
7798
    if not success:
7799
      logging.warn("pause-sync of instance %s for disks %d failed",
7800
                   instance.name, idx)
7801

    
7802
  try:
7803
    for idx, device in enumerate(instance.disks):
7804
      # The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
7805
      # MAX_WIPE_CHUNK at max
7806
      wipe_chunk_size = min(constants.MAX_WIPE_CHUNK, device.size / 100.0 *
7807
                            constants.MIN_WIPE_CHUNK_PERCENT)
7808
      # we _must_ make this an int, otherwise rounding errors will
7809
      # occur
7810
      wipe_chunk_size = int(wipe_chunk_size)
7811

    
7812
      lu.LogInfo("* Wiping disk %d", idx)
7813
      logging.info("Wiping disk %d for instance %s, node %s using"
7814
                   " chunk size %s", idx, instance.name, node, wipe_chunk_size)
7815

    
7816
      offset = 0
7817
      size = device.size
7818
      last_output = 0
7819
      start_time = time.time()
7820

    
7821
      while offset < size:
7822
        wipe_size = min(wipe_chunk_size, size - offset)
7823
        logging.debug("Wiping disk %d, offset %s, chunk %s",
7824
                      idx, offset, wipe_size)
7825
        result = lu.rpc.call_blockdev_wipe(node, device, offset, wipe_size)
7826
        result.Raise("Could not wipe disk %d at offset %d for size %d" %
7827
                     (idx, offset, wipe_size))
7828
        now = time.time()
7829
        offset += wipe_size
7830
        if now - last_output >= 60:
7831
          eta = _CalcEta(now - start_time, offset, size)
7832
          lu.LogInfo(" - done: %.1f%% ETA: %s" %
7833
                     (offset / float(size) * 100, utils.FormatSeconds(eta)))
7834
          last_output = now
7835
  finally:
7836
    logging.info("Resume sync of instance %s disks", instance.name)
7837

    
7838
    result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, False)
7839

    
7840
    for idx, success in enumerate(result.payload):
7841
      if not success:
7842
        lu.LogWarning("Resume sync of disk %d failed, please have a"
7843
                      " look at the status and troubleshoot the issue", idx)
7844
        logging.warn("resume-sync of instance %s for disks %d failed",
7845
                     instance.name, idx)
7846

    
7847

    
7848
def _CreateDisks(lu, instance, to_skip=None, target_node=None):
7849
  """Create all disks for an instance.
7850

7851
  This abstracts away some work from AddInstance.
7852

7853
  @type lu: L{LogicalUnit}
7854
  @param lu: the logical unit on whose behalf we execute
7855
  @type instance: L{objects.Instance}
7856
  @param instance: the instance whose disks we should create
7857
  @type to_skip: list
7858
  @param to_skip: list of indices to skip
7859
  @type target_node: string
7860
  @param target_node: if passed, overrides the target node for creation
7861
  @rtype: boolean
7862
  @return: the success of the creation
7863

7864
  """
7865
  info = _GetInstanceInfoText(instance)
7866
  if target_node is None:
7867
    pnode = instance.primary_node
7868
    all_nodes = instance.all_nodes
7869
  else:
7870
    pnode = target_node
7871
    all_nodes = [pnode]
7872

    
7873
  if instance.disk_template in constants.DTS_FILEBASED:
7874
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
7875
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
7876

    
7877
    result.Raise("Failed to create directory '%s' on"
7878
                 " node %s" % (file_storage_dir, pnode))
7879

    
7880
  # Note: this needs to be kept in sync with adding of disks in
7881
  # LUInstanceSetParams
7882
  for idx, device in enumerate(instance.disks):
7883
    if to_skip and idx in to_skip:
7884
      continue
7885
    logging.info("Creating volume %s for instance %s",
7886
                 device.iv_name, instance.name)
7887
    #HARDCODE
7888
    for node in all_nodes:
7889
      f_create = node == pnode
7890
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
7891

    
7892

    
7893
def _RemoveDisks(lu, instance, target_node=None):
7894
  """Remove all disks for an instance.
7895

7896
  This abstracts away some work from `AddInstance()` and
7897
  `RemoveInstance()`. Note that in case some of the devices couldn't
7898
  be removed, the removal will continue with the other ones (compare
7899
  with `_CreateDisks()`).
7900

7901
  @type lu: L{LogicalUnit}
7902
  @param lu: the logical unit on whose behalf we execute
7903
  @type instance: L{objects.Instance}
7904
  @param instance: the instance whose disks we should remove
7905
  @type target_node: string
7906
  @param target_node: used to override the node on which to remove the disks
7907
  @rtype: boolean
7908
  @return: the success of the removal
7909

7910
  """
7911
  logging.info("Removing block devices for instance %s", instance.name)
7912

    
7913
  all_result = True
7914
  for device in instance.disks:
7915
    if target_node:
7916
      edata = [(target_node, device)]
7917
    else:
7918
      edata = device.ComputeNodeTree(instance.primary_node)
7919
    for node, disk in edata:
7920
      lu.cfg.SetDiskID(disk, node)
7921
      msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
7922
      if msg:
7923
        lu.LogWarning("Could not remove block device %s on node %s,"
7924
                      " continuing anyway: %s", device.iv_name, node, msg)
7925
        all_result = False
7926

    
7927
  if instance.disk_template == constants.DT_FILE:
7928
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
7929
    if target_node:
7930
      tgt = target_node
7931
    else:
7932
      tgt = instance.primary_node
7933
    result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
7934
    if result.fail_msg:
7935
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
7936
                    file_storage_dir, instance.primary_node, result.fail_msg)
7937
      all_result = False
7938

    
7939
  return all_result
7940

    
7941

    
7942
def _ComputeDiskSizePerVG(disk_template, disks):
7943
  """Compute disk size requirements in the volume group
7944

7945
  """
7946
  def _compute(disks, payload):
7947
    """Universal algorithm.
7948

7949
    """
7950
    vgs = {}
7951
    for disk in disks:
7952
      vgs[disk[constants.IDISK_VG]] = \
7953
        vgs.get(constants.IDISK_VG, 0) + disk[constants.IDISK_SIZE] + payload
7954

    
7955
    return vgs
7956

    
7957
  # Required free disk space as a function of disk and swap space
7958
  req_size_dict = {
7959
    constants.DT_DISKLESS: {},
7960
    constants.DT_PLAIN: _compute(disks, 0),
7961
    # 128 MB are added for drbd metadata for each disk
7962
    constants.DT_DRBD8: _compute(disks, 128),
7963
    constants.DT_FILE: {},
7964
    constants.DT_SHARED_FILE: {},
7965
  }
7966

    
7967
  if disk_template not in req_size_dict:
7968
    raise errors.ProgrammerError("Disk template '%s' size requirement"
7969
                                 " is unknown" % disk_template)
7970

    
7971
  return req_size_dict[disk_template]
7972

    
7973

    
7974
def _ComputeDiskSize(disk_template, disks):
7975
  """Compute disk size requirements in the volume group
7976

7977
  """
7978
  # Required free disk space as a function of disk and swap space
7979
  req_size_dict = {
7980
    constants.DT_DISKLESS: None,
7981
    constants.DT_PLAIN: sum(d[constants.IDISK_SIZE] for d in disks),
7982
    # 128 MB are added for drbd metadata for each disk
7983
    constants.DT_DRBD8: sum(d[constants.IDISK_SIZE] + 128 for d in disks),
7984
    constants.DT_FILE: None,
7985
    constants.DT_SHARED_FILE: 0,
7986
    constants.DT_BLOCK: 0,
7987
  }
7988

    
7989
  if disk_template not in req_size_dict:
7990
    raise errors.ProgrammerError("Disk template '%s' size requirement"
7991
                                 " is unknown" % disk_template)
7992

    
7993
  return req_size_dict[disk_template]
7994

    
7995

    
7996
def _FilterVmNodes(lu, nodenames):
7997
  """Filters out non-vm_capable nodes from a list.
7998

7999
  @type lu: L{LogicalUnit}
8000
  @param lu: the logical unit for which we check
8001
  @type nodenames: list
8002
  @param nodenames: the list of nodes on which we should check
8003
  @rtype: list
8004
  @return: the list of vm-capable nodes
8005

8006
  """
8007
  vm_nodes = frozenset(lu.cfg.GetNonVmCapableNodeList())
8008
  return [name for name in nodenames if name not in vm_nodes]
8009

    
8010

    
8011
def _CheckHVParams(lu, nodenames, hvname, hvparams):
8012
  """Hypervisor parameter validation.
8013

8014
  This function abstract the hypervisor parameter validation to be
8015
  used in both instance create and instance modify.
8016

8017
  @type lu: L{LogicalUnit}
8018
  @param lu: the logical unit for which we check
8019
  @type nodenames: list
8020
  @param nodenames: the list of nodes on which we should check
8021
  @type hvname: string
8022
  @param hvname: the name of the hypervisor we should use
8023
  @type hvparams: dict
8024
  @param hvparams: the parameters which we need to check
8025
  @raise errors.OpPrereqError: if the parameters are not valid
8026

8027
  """
8028
  nodenames = _FilterVmNodes(lu, nodenames)
8029
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
8030
                                                  hvname,
8031
                                                  hvparams)
8032
  for node in nodenames:
8033
    info = hvinfo[node]
8034
    if info.offline:
8035
      continue
8036
    info.Raise("Hypervisor parameter validation failed on node %s" % node)
8037

    
8038

    
8039
def _CheckOSParams(lu, required, nodenames, osname, osparams):
8040
  """OS parameters validation.
8041

8042
  @type lu: L{LogicalUnit}
8043
  @param lu: the logical unit for which we check
8044
  @type required: boolean
8045
  @param required: whether the validation should fail if the OS is not
8046
      found
8047
  @type nodenames: list
8048
  @param nodenames: the list of nodes on which we should check
8049
  @type osname: string
8050
  @param osname: the name of the hypervisor we should use
8051
  @type osparams: dict
8052
  @param osparams: the parameters which we need to check
8053
  @raise errors.OpPrereqError: if the parameters are not valid
8054

8055
  """
8056
  nodenames = _FilterVmNodes(lu, nodenames)
8057
  result = lu.rpc.call_os_validate(required, nodenames, osname,
8058
                                   [constants.OS_VALIDATE_PARAMETERS],
8059
                                   osparams)
8060
  for node, nres in result.items():
8061
    # we don't check for offline cases since this should be run only
8062
    # against the master node and/or an instance's nodes
8063
    nres.Raise("OS Parameters validation failed on node %s" % node)
8064
    if not nres.payload:
8065
      lu.LogInfo("OS %s not found on node %s, validation skipped",
8066
                 osname, node)
8067

    
8068

    
8069
class LUInstanceCreate(LogicalUnit):
8070
  """Create an instance.
8071

8072
  """
8073
  HPATH = "instance-add"
8074
  HTYPE = constants.HTYPE_INSTANCE
8075
  REQ_BGL = False
8076

    
8077
  def CheckArguments(self):
8078
    """Check arguments.
8079

8080
    """
8081
    # do not require name_check to ease forward/backward compatibility
8082
    # for tools
8083
    if self.op.no_install and self.op.start:
8084
      self.LogInfo("No-installation mode selected, disabling startup")
8085
      self.op.start = False
8086
    # validate/normalize the instance name
8087
    self.op.instance_name = \
8088
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
8089

    
8090
    if self.op.ip_check and not self.op.name_check:
8091
      # TODO: make the ip check more flexible and not depend on the name check
8092
      raise errors.OpPrereqError("Cannot do IP address check without a name"
8093
                                 " check", errors.ECODE_INVAL)
8094

    
8095
    # check nics' parameter names
8096
    for nic in self.op.nics:
8097
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
8098

    
8099
    # check disks. parameter names and consistent adopt/no-adopt strategy
8100
    has_adopt = has_no_adopt = False
8101
    for disk in self.op.disks:
8102
      utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
8103
      if constants.IDISK_ADOPT in disk:
8104
        has_adopt = True
8105
      else:
8106
        has_no_adopt = True
8107
    if has_adopt and has_no_adopt:
8108
      raise errors.OpPrereqError("Either all disks are adopted or none is",
8109
                                 errors.ECODE_INVAL)
8110
    if has_adopt:
8111
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
8112
        raise errors.OpPrereqError("Disk adoption is not supported for the"
8113
                                   " '%s' disk template" %
8114
                                   self.op.disk_template,
8115
                                   errors.ECODE_INVAL)
8116
      if self.op.iallocator is not None:
8117
        raise errors.OpPrereqError("Disk adoption not allowed with an"
8118
                                   " iallocator script", errors.ECODE_INVAL)
8119
      if self.op.mode == constants.INSTANCE_IMPORT:
8120
        raise errors.OpPrereqError("Disk adoption not allowed for"
8121
                                   " instance import", errors.ECODE_INVAL)
8122
    else:
8123
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
8124
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
8125
                                   " but no 'adopt' parameter given" %
8126
                                   self.op.disk_template,
8127
                                   errors.ECODE_INVAL)
8128

    
8129
    self.adopt_disks = has_adopt
8130

    
8131
    # instance name verification
8132
    if self.op.name_check:
8133
      self.hostname1 = netutils.GetHostname(name=self.op.instance_name)
8134
      self.op.instance_name = self.hostname1.name
8135
      # used in CheckPrereq for ip ping check
8136
      self.check_ip = self.hostname1.ip
8137
    else:
8138
      self.check_ip = None
8139

    
8140
    # file storage checks
8141
    if (self.op.file_driver and
8142
        not self.op.file_driver in constants.FILE_DRIVER):
8143
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
8144
                                 self.op.file_driver, errors.ECODE_INVAL)
8145

    
8146
    if self.op.disk_template == constants.DT_FILE:
8147
      opcodes.RequireFileStorage()
8148
    elif self.op.disk_template == constants.DT_SHARED_FILE:
8149
      opcodes.RequireSharedFileStorage()
8150

    
8151
    ### Node/iallocator related checks
8152
    _CheckIAllocatorOrNode(self, "iallocator", "pnode")
8153

    
8154
    if self.op.pnode is not None:
8155
      if self.op.disk_template in constants.DTS_INT_MIRROR:
8156
        if self.op.snode is None:
8157
          raise errors.OpPrereqError("The networked disk templates need"
8158
                                     " a mirror node", errors.ECODE_INVAL)
8159
      elif self.op.snode:
8160
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
8161
                        " template")
8162
        self.op.snode = None
8163

    
8164
    self._cds = _GetClusterDomainSecret()
8165

    
8166
    if self.op.mode == constants.INSTANCE_IMPORT:
8167
      # On import force_variant must be True, because if we forced it at
8168
      # initial install, our only chance when importing it back is that it
8169
      # works again!
8170
      self.op.force_variant = True
8171

    
8172
      if self.op.no_install:
8173
        self.LogInfo("No-installation mode has no effect during import")
8174

    
8175
    elif self.op.mode == constants.INSTANCE_CREATE:
8176
      if self.op.os_type is None:
8177
        raise errors.OpPrereqError("No guest OS specified",
8178
                                   errors.ECODE_INVAL)
8179
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
8180
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
8181
                                   " installation" % self.op.os_type,
8182
                                   errors.ECODE_STATE)
8183
      if self.op.disk_template is None:
8184
        raise errors.OpPrereqError("No disk template specified",
8185
                                   errors.ECODE_INVAL)
8186

    
8187
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
8188
      # Check handshake to ensure both clusters have the same domain secret
8189
      src_handshake = self.op.source_handshake
8190
      if not src_handshake:
8191
        raise errors.OpPrereqError("Missing source handshake",
8192
                                   errors.ECODE_INVAL)
8193

    
8194
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
8195
                                                           src_handshake)
8196
      if errmsg:
8197
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
8198
                                   errors.ECODE_INVAL)
8199

    
8200
      # Load and check source CA
8201
      self.source_x509_ca_pem = self.op.source_x509_ca
8202
      if not self.source_x509_ca_pem:
8203
        raise errors.OpPrereqError("Missing source X509 CA",
8204
                                   errors.ECODE_INVAL)
8205

    
8206
      try:
8207
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
8208
                                                    self._cds)
8209
      except OpenSSL.crypto.Error, err:
8210
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
8211
                                   (err, ), errors.ECODE_INVAL)
8212

    
8213
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
8214
      if errcode is not None:
8215
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
8216
                                   errors.ECODE_INVAL)
8217

    
8218
      self.source_x509_ca = cert
8219

    
8220
      src_instance_name = self.op.source_instance_name
8221
      if not src_instance_name:
8222
        raise errors.OpPrereqError("Missing source instance name",
8223
                                   errors.ECODE_INVAL)
8224

    
8225
      self.source_instance_name = \
8226
          netutils.GetHostname(name=src_instance_name).name
8227

    
8228
    else:
8229
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
8230
                                 self.op.mode, errors.ECODE_INVAL)
8231

    
8232
  def ExpandNames(self):
8233
    """ExpandNames for CreateInstance.
8234

8235
    Figure out the right locks for instance creation.
8236

8237
    """
8238
    self.needed_locks = {}
8239

    
8240
    instance_name = self.op.instance_name
8241
    # this is just a preventive check, but someone might still add this
8242
    # instance in the meantime, and creation will fail at lock-add time
8243
    if instance_name in self.cfg.GetInstanceList():
8244
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
8245
                                 instance_name, errors.ECODE_EXISTS)
8246

    
8247
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
8248

    
8249
    if self.op.iallocator:
8250
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
8251
    else:
8252
      self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
8253
      nodelist = [self.op.pnode]
8254
      if self.op.snode is not None:
8255
        self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
8256
        nodelist.append(self.op.snode)
8257
      self.needed_locks[locking.LEVEL_NODE] = nodelist
8258

    
8259
    # in case of import lock the source node too
8260
    if self.op.mode == constants.INSTANCE_IMPORT:
8261
      src_node = self.op.src_node
8262
      src_path = self.op.src_path
8263

    
8264
      if src_path is None:
8265
        self.op.src_path = src_path = self.op.instance_name
8266

    
8267
      if src_node is None:
8268
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
8269
        self.op.src_node = None
8270
        if os.path.isabs(src_path):
8271
          raise errors.OpPrereqError("Importing an instance from a path"
8272
                                     " requires a source node option",
8273
                                     errors.ECODE_INVAL)
8274
      else:
8275
        self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
8276
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
8277
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
8278
        if not os.path.isabs(src_path):
8279
          self.op.src_path = src_path = \
8280
            utils.PathJoin(constants.EXPORT_DIR, src_path)
8281

    
8282
  def _RunAllocator(self):
8283
    """Run the allocator based on input opcode.
8284

8285
    """
8286
    nics = [n.ToDict() for n in self.nics]
8287
    ial = IAllocator(self.cfg, self.rpc,
8288
                     mode=constants.IALLOCATOR_MODE_ALLOC,
8289
                     name=self.op.instance_name,
8290
                     disk_template=self.op.disk_template,
8291
                     tags=self.op.tags,
8292
                     os=self.op.os_type,
8293
                     vcpus=self.be_full[constants.BE_VCPUS],
8294
                     memory=self.be_full[constants.BE_MEMORY],
8295
                     disks=self.disks,
8296
                     nics=nics,
8297
                     hypervisor=self.op.hypervisor,
8298
                     )
8299

    
8300
    ial.Run(self.op.iallocator)
8301

    
8302
    if not ial.success:
8303
      raise errors.OpPrereqError("Can't compute nodes using"
8304
                                 " iallocator '%s': %s" %
8305
                                 (self.op.iallocator, ial.info),
8306
                                 errors.ECODE_NORES)
8307
    if len(ial.result) != ial.required_nodes:
8308
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
8309
                                 " of nodes (%s), required %s" %
8310
                                 (self.op.iallocator, len(ial.result),
8311
                                  ial.required_nodes), errors.ECODE_FAULT)
8312
    self.op.pnode = ial.result[0]
8313
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
8314
                 self.op.instance_name, self.op.iallocator,
8315
                 utils.CommaJoin(ial.result))
8316
    if ial.required_nodes == 2:
8317
      self.op.snode = ial.result[1]
8318

    
8319
  def BuildHooksEnv(self):
8320
    """Build hooks env.
8321

8322
    This runs on master, primary and secondary nodes of the instance.
8323

8324
    """
8325
    env = {
8326
      "ADD_MODE": self.op.mode,
8327
      }
8328
    if self.op.mode == constants.INSTANCE_IMPORT:
8329
      env["SRC_NODE"] = self.op.src_node
8330
      env["SRC_PATH"] = self.op.src_path
8331
      env["SRC_IMAGES"] = self.src_images
8332

    
8333
    env.update(_BuildInstanceHookEnv(
8334
      name=self.op.instance_name,
8335
      primary_node=self.op.pnode,
8336
      secondary_nodes=self.secondaries,
8337
      status=self.op.start,
8338
      os_type=self.op.os_type,
8339
      memory=self.be_full[constants.BE_MEMORY],
8340
      vcpus=self.be_full[constants.BE_VCPUS],
8341
      nics=_NICListToTuple(self, self.nics),
8342
      disk_template=self.op.disk_template,
8343
      disks=[(d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
8344
             for d in self.disks],
8345
      bep=self.be_full,
8346
      hvp=self.hv_full,
8347
      hypervisor_name=self.op.hypervisor,
8348
      tags=self.op.tags,
8349
    ))
8350

    
8351
    return env
8352

    
8353
  def BuildHooksNodes(self):
8354
    """Build hooks nodes.
8355

8356
    """
8357
    nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
8358
    return nl, nl
8359

    
8360
  def _ReadExportInfo(self):
8361
    """Reads the export information from disk.
8362

8363
    It will override the opcode source node and path with the actual
8364
    information, if these two were not specified before.
8365

8366
    @return: the export information
8367

8368
    """
8369
    assert self.op.mode == constants.INSTANCE_IMPORT
8370

    
8371
    src_node = self.op.src_node
8372
    src_path = self.op.src_path
8373

    
8374
    if src_node is None:
8375
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
8376
      exp_list = self.rpc.call_export_list(locked_nodes)
8377
      found = False
8378
      for node in exp_list:
8379
        if exp_list[node].fail_msg:
8380
          continue
8381
        if src_path in exp_list[node].payload:
8382
          found = True
8383
          self.op.src_node = src_node = node
8384
          self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
8385
                                                       src_path)
8386
          break
8387
      if not found:
8388
        raise errors.OpPrereqError("No export found for relative path %s" %
8389
                                    src_path, errors.ECODE_INVAL)
8390

    
8391
    _CheckNodeOnline(self, src_node)
8392
    result = self.rpc.call_export_info(src_node, src_path)
8393
    result.Raise("No export or invalid export found in dir %s" % src_path)
8394

    
8395
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
8396
    if not export_info.has_section(constants.INISECT_EXP):
8397
      raise errors.ProgrammerError("Corrupted export config",
8398
                                   errors.ECODE_ENVIRON)
8399

    
8400
    ei_version = export_info.get(constants.INISECT_EXP, "version")
8401
    if (int(ei_version) != constants.EXPORT_VERSION):
8402
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
8403
                                 (ei_version, constants.EXPORT_VERSION),
8404
                                 errors.ECODE_ENVIRON)
8405
    return export_info
8406

    
8407
  def _ReadExportParams(self, einfo):
8408
    """Use export parameters as defaults.
8409

8410
    In case the opcode doesn't specify (as in override) some instance
8411
    parameters, then try to use them from the export information, if
8412
    that declares them.
8413

8414
    """
8415
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
8416

    
8417
    if self.op.disk_template is None:
8418
      if einfo.has_option(constants.INISECT_INS, "disk_template"):
8419
        self.op.disk_template = einfo.get(constants.INISECT_INS,
8420
                                          "disk_template")
8421
      else:
8422
        raise errors.OpPrereqError("No disk template specified and the export"
8423
                                   " is missing the disk_template information",
8424
                                   errors.ECODE_INVAL)
8425

    
8426
    if not self.op.disks:
8427
      if einfo.has_option(constants.INISECT_INS, "disk_count"):
8428
        disks = []
8429
        # TODO: import the disk iv_name too
8430
        for idx in range(einfo.getint(constants.INISECT_INS, "disk_count")):
8431
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
8432
          disks.append({constants.IDISK_SIZE: disk_sz})
8433
        self.op.disks = disks
8434
      else:
8435
        raise errors.OpPrereqError("No disk info specified and the export"
8436
                                   " is missing the disk information",
8437
                                   errors.ECODE_INVAL)
8438

    
8439
    if (not self.op.nics and
8440
        einfo.has_option(constants.INISECT_INS, "nic_count")):
8441
      nics = []
8442
      for idx in range(einfo.getint(constants.INISECT_INS, "nic_count")):
8443
        ndict = {}
8444
        for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
8445
          v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
8446
          ndict[name] = v
8447
        nics.append(ndict)
8448
      self.op.nics = nics
8449

    
8450
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
8451
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
8452

    
8453
    if (self.op.hypervisor is None and
8454
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
8455
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
8456

    
8457
    if einfo.has_section(constants.INISECT_HYP):
8458
      # use the export parameters but do not override the ones
8459
      # specified by the user
8460
      for name, value in einfo.items(constants.INISECT_HYP):
8461
        if name not in self.op.hvparams:
8462
          self.op.hvparams[name] = value
8463

    
8464
    if einfo.has_section(constants.INISECT_BEP):
8465
      # use the parameters, without overriding
8466
      for name, value in einfo.items(constants.INISECT_BEP):
8467
        if name not in self.op.beparams:
8468
          self.op.beparams[name] = value
8469
    else:
8470
      # try to read the parameters old style, from the main section
8471
      for name in constants.BES_PARAMETERS:
8472
        if (name not in self.op.beparams and
8473
            einfo.has_option(constants.INISECT_INS, name)):
8474
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
8475

    
8476
    if einfo.has_section(constants.INISECT_OSP):
8477
      # use the parameters, without overriding
8478
      for name, value in einfo.items(constants.INISECT_OSP):
8479
        if name not in self.op.osparams:
8480
          self.op.osparams[name] = value
8481

    
8482
  def _RevertToDefaults(self, cluster):
8483
    """Revert the instance parameters to the default values.
8484

8485
    """
8486
    # hvparams
8487
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
8488
    for name in self.op.hvparams.keys():
8489
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
8490
        del self.op.hvparams[name]
8491
    # beparams
8492
    be_defs = cluster.SimpleFillBE({})
8493
    for name in self.op.beparams.keys():
8494
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
8495
        del self.op.beparams[name]
8496
    # nic params
8497
    nic_defs = cluster.SimpleFillNIC({})
8498
    for nic in self.op.nics:
8499
      for name in constants.NICS_PARAMETERS:
8500
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
8501
          del nic[name]
8502
    # osparams
8503
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
8504
    for name in self.op.osparams.keys():
8505
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
8506
        del self.op.osparams[name]
8507

    
8508
  def _CalculateFileStorageDir(self):
8509
    """Calculate final instance file storage dir.
8510

8511
    """
8512
    # file storage dir calculation/check
8513
    self.instance_file_storage_dir = None
8514
    if self.op.disk_template in constants.DTS_FILEBASED:
8515
      # build the full file storage dir path
8516
      joinargs = []
8517

    
8518
      if self.op.disk_template == constants.DT_SHARED_FILE:
8519
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
8520
      else:
8521
        get_fsd_fn = self.cfg.GetFileStorageDir
8522

    
8523
      cfg_storagedir = get_fsd_fn()
8524
      if not cfg_storagedir:
8525
        raise errors.OpPrereqError("Cluster file storage dir not defined")
8526
      joinargs.append(cfg_storagedir)
8527

    
8528
      if self.op.file_storage_dir is not None:
8529
        joinargs.append(self.op.file_storage_dir)
8530

    
8531
      joinargs.append(self.op.instance_name)
8532

    
8533
      # pylint: disable=W0142
8534
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
8535

    
8536
  def CheckPrereq(self):
8537
    """Check prerequisites.
8538

8539
    """
8540
    self._CalculateFileStorageDir()
8541

    
8542
    if self.op.mode == constants.INSTANCE_IMPORT:
8543
      export_info = self._ReadExportInfo()
8544
      self._ReadExportParams(export_info)
8545

    
8546
    if (not self.cfg.GetVGName() and
8547
        self.op.disk_template not in constants.DTS_NOT_LVM):
8548
      raise errors.OpPrereqError("Cluster does not support lvm-based"
8549
                                 " instances", errors.ECODE_STATE)
8550

    
8551
    if self.op.hypervisor is None:
8552
      self.op.hypervisor = self.cfg.GetHypervisorType()
8553

    
8554
    cluster = self.cfg.GetClusterInfo()
8555
    enabled_hvs = cluster.enabled_hypervisors
8556
    if self.op.hypervisor not in enabled_hvs:
8557
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
8558
                                 " cluster (%s)" % (self.op.hypervisor,
8559
                                  ",".join(enabled_hvs)),
8560
                                 errors.ECODE_STATE)
8561

    
8562
    # Check tag validity
8563
    for tag in self.op.tags:
8564
      objects.TaggableObject.ValidateTag(tag)
8565

    
8566
    # check hypervisor parameter syntax (locally)
8567
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
8568
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
8569
                                      self.op.hvparams)
8570
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
8571
    hv_type.CheckParameterSyntax(filled_hvp)
8572
    self.hv_full = filled_hvp
8573
    # check that we don't specify global parameters on an instance
8574
    _CheckGlobalHvParams(self.op.hvparams)
8575

    
8576
    # fill and remember the beparams dict
8577
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
8578
    self.be_full = cluster.SimpleFillBE(self.op.beparams)
8579

    
8580
    # build os parameters
8581
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
8582

    
8583
    # now that hvp/bep are in final format, let's reset to defaults,
8584
    # if told to do so
8585
    if self.op.identify_defaults:
8586
      self._RevertToDefaults(cluster)
8587

    
8588
    # NIC buildup
8589
    self.nics = []
8590
    for idx, nic in enumerate(self.op.nics):
8591
      nic_mode_req = nic.get(constants.INIC_MODE, None)
8592
      nic_mode = nic_mode_req
8593
      if nic_mode is None:
8594
        nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
8595

    
8596
      # in routed mode, for the first nic, the default ip is 'auto'
8597
      if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
8598
        default_ip_mode = constants.VALUE_AUTO
8599
      else:
8600
        default_ip_mode = constants.VALUE_NONE
8601

    
8602
      # ip validity checks
8603
      ip = nic.get(constants.INIC_IP, default_ip_mode)
8604
      if ip is None or ip.lower() == constants.VALUE_NONE:
8605
        nic_ip = None
8606
      elif ip.lower() == constants.VALUE_AUTO:
8607
        if not self.op.name_check:
8608
          raise errors.OpPrereqError("IP address set to auto but name checks"
8609
                                     " have been skipped",
8610
                                     errors.ECODE_INVAL)
8611
        nic_ip = self.hostname1.ip
8612
      else:
8613
        if not netutils.IPAddress.IsValid(ip):
8614
          raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
8615
                                     errors.ECODE_INVAL)
8616
        nic_ip = ip
8617

    
8618
      # TODO: check the ip address for uniqueness
8619
      if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
8620
        raise errors.OpPrereqError("Routed nic mode requires an ip address",
8621
                                   errors.ECODE_INVAL)
8622

    
8623
      # MAC address verification
8624
      mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
8625
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
8626
        mac = utils.NormalizeAndValidateMac(mac)
8627

    
8628
        try:
8629
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
8630
        except errors.ReservationError:
8631
          raise errors.OpPrereqError("MAC address %s already in use"
8632
                                     " in cluster" % mac,
8633
                                     errors.ECODE_NOTUNIQUE)
8634

    
8635
      #  Build nic parameters
8636
      link = nic.get(constants.INIC_LINK, None)
8637
      nicparams = {}
8638
      if nic_mode_req:
8639
        nicparams[constants.NIC_MODE] = nic_mode_req
8640
      if link:
8641
        nicparams[constants.NIC_LINK] = link
8642

    
8643
      check_params = cluster.SimpleFillNIC(nicparams)
8644
      objects.NIC.CheckParameterSyntax(check_params)
8645
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
8646

    
8647
    # disk checks/pre-build
8648
    default_vg = self.cfg.GetVGName()
8649
    self.disks = []
8650
    for disk in self.op.disks:
8651
      mode = disk.get(constants.IDISK_MODE, constants.DISK_RDWR)
8652
      if mode not in constants.DISK_ACCESS_SET:
8653
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
8654
                                   mode, errors.ECODE_INVAL)
8655
      size = disk.get(constants.IDISK_SIZE, None)
8656
      if size is None:
8657
        raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
8658
      try:
8659
        size = int(size)
8660
      except (TypeError, ValueError):
8661
        raise errors.OpPrereqError("Invalid disk size '%s'" % size,
8662
                                   errors.ECODE_INVAL)
8663

    
8664
      data_vg = disk.get(constants.IDISK_VG, default_vg)
8665
      new_disk = {
8666
        constants.IDISK_SIZE: size,
8667
        constants.IDISK_MODE: mode,
8668
        constants.IDISK_VG: data_vg,
8669
        constants.IDISK_METAVG: disk.get(constants.IDISK_METAVG, data_vg),
8670
        }
8671
      if constants.IDISK_ADOPT in disk:
8672
        new_disk[constants.IDISK_ADOPT] = disk[constants.IDISK_ADOPT]
8673
      self.disks.append(new_disk)
8674

    
8675
    if self.op.mode == constants.INSTANCE_IMPORT:
8676

    
8677
      # Check that the new instance doesn't have less disks than the export
8678
      instance_disks = len(self.disks)
8679
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
8680
      if instance_disks < export_disks:
8681
        raise errors.OpPrereqError("Not enough disks to import."
8682
                                   " (instance: %d, export: %d)" %
8683
                                   (instance_disks, export_disks),
8684
                                   errors.ECODE_INVAL)
8685

    
8686
      disk_images = []
8687
      for idx in range(export_disks):
8688
        option = "disk%d_dump" % idx
8689
        if export_info.has_option(constants.INISECT_INS, option):
8690
          # FIXME: are the old os-es, disk sizes, etc. useful?
8691
          export_name = export_info.get(constants.INISECT_INS, option)
8692
          image = utils.PathJoin(self.op.src_path, export_name)
8693
          disk_images.append(image)
8694
        else:
8695
          disk_images.append(False)
8696

    
8697
      self.src_images = disk_images
8698

    
8699
      old_name = export_info.get(constants.INISECT_INS, "name")
8700
      try:
8701
        exp_nic_count = export_info.getint(constants.INISECT_INS, "nic_count")
8702
      except (TypeError, ValueError), err:
8703
        raise errors.OpPrereqError("Invalid export file, nic_count is not"
8704
                                   " an integer: %s" % str(err),
8705
                                   errors.ECODE_STATE)
8706
      if self.op.instance_name == old_name:
8707
        for idx, nic in enumerate(self.nics):
8708
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
8709
            nic_mac_ini = "nic%d_mac" % idx
8710
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
8711

    
8712
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
8713

    
8714
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
8715
    if self.op.ip_check:
8716
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
8717
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
8718
                                   (self.check_ip, self.op.instance_name),
8719
                                   errors.ECODE_NOTUNIQUE)
8720

    
8721
    #### mac address generation
8722
    # By generating here the mac address both the allocator and the hooks get
8723
    # the real final mac address rather than the 'auto' or 'generate' value.
8724
    # There is a race condition between the generation and the instance object
8725
    # creation, which means that we know the mac is valid now, but we're not
8726
    # sure it will be when we actually add the instance. If things go bad
8727
    # adding the instance will abort because of a duplicate mac, and the
8728
    # creation job will fail.
8729
    for nic in self.nics:
8730
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
8731
        nic.mac = self.cfg.GenerateMAC(self.proc.GetECId())
8732

    
8733
    #### allocator run
8734

    
8735
    if self.op.iallocator is not None:
8736
      self._RunAllocator()
8737

    
8738
    #### node related checks
8739

    
8740
    # check primary node
8741
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
8742
    assert self.pnode is not None, \
8743
      "Cannot retrieve locked node %s" % self.op.pnode
8744
    if pnode.offline:
8745
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
8746
                                 pnode.name, errors.ECODE_STATE)
8747
    if pnode.drained:
8748
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
8749
                                 pnode.name, errors.ECODE_STATE)
8750
    if not pnode.vm_capable:
8751
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
8752
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
8753

    
8754
    self.secondaries = []
8755

    
8756
    # mirror node verification
8757
    if self.op.disk_template in constants.DTS_INT_MIRROR:
8758
      if self.op.snode == pnode.name:
8759
        raise errors.OpPrereqError("The secondary node cannot be the"
8760
                                   " primary node", errors.ECODE_INVAL)
8761
      _CheckNodeOnline(self, self.op.snode)
8762
      _CheckNodeNotDrained(self, self.op.snode)
8763
      _CheckNodeVmCapable(self, self.op.snode)
8764
      self.secondaries.append(self.op.snode)
8765

    
8766
    nodenames = [pnode.name] + self.secondaries
8767

    
8768
    if not self.adopt_disks:
8769
      # Check lv size requirements, if not adopting
8770
      req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
8771
      _CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
8772

    
8773
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
8774
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
8775
                                disk[constants.IDISK_ADOPT])
8776
                     for disk in self.disks])
8777
      if len(all_lvs) != len(self.disks):
8778
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
8779
                                   errors.ECODE_INVAL)
8780
      for lv_name in all_lvs:
8781
        try:
8782
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
8783
          # to ReserveLV uses the same syntax
8784
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
8785
        except errors.ReservationError:
8786
          raise errors.OpPrereqError("LV named %s used by another instance" %
8787
                                     lv_name, errors.ECODE_NOTUNIQUE)
8788

    
8789
      vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
8790
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
8791

    
8792
      node_lvs = self.rpc.call_lv_list([pnode.name],
8793
                                       vg_names.payload.keys())[pnode.name]
8794
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
8795
      node_lvs = node_lvs.payload
8796

    
8797
      delta = all_lvs.difference(node_lvs.keys())
8798
      if delta:
8799
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
8800
                                   utils.CommaJoin(delta),
8801
                                   errors.ECODE_INVAL)
8802
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
8803
      if online_lvs:
8804
        raise errors.OpPrereqError("Online logical volumes found, cannot"
8805
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
8806
                                   errors.ECODE_STATE)
8807
      # update the size of disk based on what is found
8808
      for dsk in self.disks:
8809
        dsk[constants.IDISK_SIZE] = \
8810
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
8811
                                        dsk[constants.IDISK_ADOPT])][0]))
8812

    
8813
    elif self.op.disk_template == constants.DT_BLOCK:
8814
      # Normalize and de-duplicate device paths
8815
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
8816
                       for disk in self.disks])
8817
      if len(all_disks) != len(self.disks):
8818
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
8819
                                   errors.ECODE_INVAL)
8820
      baddisks = [d for d in all_disks
8821
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
8822
      if baddisks:
8823
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
8824
                                   " cannot be adopted" %
8825
                                   (", ".join(baddisks),
8826
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
8827
                                   errors.ECODE_INVAL)
8828

    
8829
      node_disks = self.rpc.call_bdev_sizes([pnode.name],
8830
                                            list(all_disks))[pnode.name]
8831
      node_disks.Raise("Cannot get block device information from node %s" %
8832
                       pnode.name)
8833
      node_disks = node_disks.payload
8834
      delta = all_disks.difference(node_disks.keys())
8835
      if delta:
8836
        raise errors.OpPrereqError("Missing block device(s): %s" %
8837
                                   utils.CommaJoin(delta),
8838
                                   errors.ECODE_INVAL)
8839
      for dsk in self.disks:
8840
        dsk[constants.IDISK_SIZE] = \
8841
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
8842

    
8843
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
8844

    
8845
    _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
8846
    # check OS parameters (remotely)
8847
    _CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
8848

    
8849
    _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
8850

    
8851
    # memory check on primary node
8852
    if self.op.start:
8853
      _CheckNodeFreeMemory(self, self.pnode.name,
8854
                           "creating instance %s" % self.op.instance_name,
8855
                           self.be_full[constants.BE_MEMORY],
8856
                           self.op.hypervisor)
8857

    
8858
    self.dry_run_result = list(nodenames)
8859

    
8860
  def Exec(self, feedback_fn):
8861
    """Create and add the instance to the cluster.
8862

8863
    """
8864
    instance = self.op.instance_name
8865
    pnode_name = self.pnode.name
8866

    
8867
    ht_kind = self.op.hypervisor
8868
    if ht_kind in constants.HTS_REQ_PORT:
8869
      network_port = self.cfg.AllocatePort()
8870
    else:
8871
      network_port = None
8872

    
8873
    disks = _GenerateDiskTemplate(self,
8874
                                  self.op.disk_template,
8875
                                  instance, pnode_name,
8876
                                  self.secondaries,
8877
                                  self.disks,
8878
                                  self.instance_file_storage_dir,
8879
                                  self.op.file_driver,
8880
                                  0,
8881
                                  feedback_fn)
8882

    
8883
    iobj = objects.Instance(name=instance, os=self.op.os_type,
8884
                            primary_node=pnode_name,
8885
                            nics=self.nics, disks=disks,
8886
                            disk_template=self.op.disk_template,
8887
                            admin_up=False,
8888
                            network_port=network_port,
8889
                            beparams=self.op.beparams,
8890
                            hvparams=self.op.hvparams,
8891
                            hypervisor=self.op.hypervisor,
8892
                            osparams=self.op.osparams,
8893
                            )
8894

    
8895
    if self.op.tags:
8896
      for tag in self.op.tags:
8897
        iobj.AddTag(tag)
8898

    
8899
    if self.adopt_disks:
8900
      if self.op.disk_template == constants.DT_PLAIN:
8901
        # rename LVs to the newly-generated names; we need to construct
8902
        # 'fake' LV disks with the old data, plus the new unique_id
8903
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
8904
        rename_to = []
8905
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
8906
          rename_to.append(t_dsk.logical_id)
8907
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
8908
          self.cfg.SetDiskID(t_dsk, pnode_name)
8909
        result = self.rpc.call_blockdev_rename(pnode_name,
8910
                                               zip(tmp_disks, rename_to))
8911
        result.Raise("Failed to rename adoped LVs")
8912
    else:
8913
      feedback_fn("* creating instance disks...")
8914
      try:
8915
        _CreateDisks(self, iobj)
8916
      except errors.OpExecError:
8917
        self.LogWarning("Device creation failed, reverting...")
8918
        try:
8919
          _RemoveDisks(self, iobj)
8920
        finally:
8921
          self.cfg.ReleaseDRBDMinors(instance)
8922
          raise
8923

    
8924
    feedback_fn("adding instance %s to cluster config" % instance)
8925

    
8926
    self.cfg.AddInstance(iobj, self.proc.GetECId())
8927

    
8928
    # Declare that we don't want to remove the instance lock anymore, as we've
8929
    # added the instance to the config
8930
    del self.remove_locks[locking.LEVEL_INSTANCE]
8931

    
8932
    if self.op.mode == constants.INSTANCE_IMPORT:
8933
      # Release unused nodes
8934
      _ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
8935
    else:
8936
      # Release all nodes
8937
      _ReleaseLocks(self, locking.LEVEL_NODE)
8938

    
8939
    disk_abort = False
8940
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
8941
      feedback_fn("* wiping instance disks...")
8942
      try:
8943
        _WipeDisks(self, iobj)
8944
      except errors.OpExecError, err:
8945
        logging.exception("Wiping disks failed")
8946
        self.LogWarning("Wiping instance disks failed (%s)", err)
8947
        disk_abort = True
8948

    
8949
    if disk_abort:
8950
      # Something is already wrong with the disks, don't do anything else
8951
      pass
8952
    elif self.op.wait_for_sync:
8953
      disk_abort = not _WaitForSync(self, iobj)
8954
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
8955
      # make sure the disks are not degraded (still sync-ing is ok)
8956
      feedback_fn("* checking mirrors status")
8957
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
8958
    else:
8959
      disk_abort = False
8960

    
8961
    if disk_abort:
8962
      _RemoveDisks(self, iobj)
8963
      self.cfg.RemoveInstance(iobj.name)
8964
      # Make sure the instance lock gets removed
8965
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
8966
      raise errors.OpExecError("There are some degraded disks for"
8967
                               " this instance")
8968

    
8969
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
8970
      if self.op.mode == constants.INSTANCE_CREATE:
8971
        if not self.op.no_install:
8972
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
8973
                        not self.op.wait_for_sync)
8974
          if pause_sync:
8975
            feedback_fn("* pausing disk sync to install instance OS")
8976
            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
8977
                                                              iobj.disks, True)
8978
            for idx, success in enumerate(result.payload):
8979
              if not success:
8980
                logging.warn("pause-sync of instance %s for disk %d failed",
8981
                             instance, idx)
8982

    
8983
          feedback_fn("* running the instance OS create scripts...")
8984
          # FIXME: pass debug option from opcode to backend
8985
          os_add_result = \
8986
            self.rpc.call_instance_os_add(pnode_name, iobj, False,
8987
                                          self.op.debug_level)
8988
          if pause_sync:
8989
            feedback_fn("* resuming disk sync")
8990
            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
8991
                                                              iobj.disks, False)
8992
            for idx, success in enumerate(result.payload):
8993
              if not success:
8994
                logging.warn("resume-sync of instance %s for disk %d failed",
8995
                             instance, idx)
8996

    
8997
          os_add_result.Raise("Could not add os for instance %s"
8998
                              " on node %s" % (instance, pnode_name))
8999

    
9000
      elif self.op.mode == constants.INSTANCE_IMPORT:
9001
        feedback_fn("* running the instance OS import scripts...")
9002

    
9003
        transfers = []
9004

    
9005
        for idx, image in enumerate(self.src_images):
9006
          if not image:
9007
            continue
9008

    
9009
          # FIXME: pass debug option from opcode to backend
9010
          dt = masterd.instance.DiskTransfer("disk/%s" % idx,
9011
                                             constants.IEIO_FILE, (image, ),
9012
                                             constants.IEIO_SCRIPT,
9013
                                             (iobj.disks[idx], idx),
9014
                                             None)
9015
          transfers.append(dt)
9016

    
9017
        import_result = \
9018
          masterd.instance.TransferInstanceData(self, feedback_fn,
9019
                                                self.op.src_node, pnode_name,
9020
                                                self.pnode.secondary_ip,
9021
                                                iobj, transfers)
9022
        if not compat.all(import_result):
9023
          self.LogWarning("Some disks for instance %s on node %s were not"
9024
                          " imported successfully" % (instance, pnode_name))
9025

    
9026
      elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
9027
        feedback_fn("* preparing remote import...")
9028
        # The source cluster will stop the instance before attempting to make a
9029
        # connection. In some cases stopping an instance can take a long time,
9030
        # hence the shutdown timeout is added to the connection timeout.
9031
        connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
9032
                           self.op.source_shutdown_timeout)
9033
        timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
9034

    
9035
        assert iobj.primary_node == self.pnode.name
9036
        disk_results = \
9037
          masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
9038
                                        self.source_x509_ca,
9039
                                        self._cds, timeouts)
9040
        if not compat.all(disk_results):
9041
          # TODO: Should the instance still be started, even if some disks
9042
          # failed to import (valid for local imports, too)?
9043
          self.LogWarning("Some disks for instance %s on node %s were not"
9044
                          " imported successfully" % (instance, pnode_name))
9045

    
9046
        # Run rename script on newly imported instance
9047
        assert iobj.name == instance
9048
        feedback_fn("Running rename script for %s" % instance)
9049
        result = self.rpc.call_instance_run_rename(pnode_name, iobj,
9050
                                                   self.source_instance_name,
9051
                                                   self.op.debug_level)
9052
        if result.fail_msg:
9053
          self.LogWarning("Failed to run rename script for %s on node"
9054
                          " %s: %s" % (instance, pnode_name, result.fail_msg))
9055

    
9056
      else:
9057
        # also checked in the prereq part
9058
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
9059
                                     % self.op.mode)
9060

    
9061
    if self.op.start:
9062
      iobj.admin_up = True
9063
      self.cfg.Update(iobj, feedback_fn)
9064
      logging.info("Starting instance %s on node %s", instance, pnode_name)
9065
      feedback_fn("* starting instance...")
9066
      result = self.rpc.call_instance_start(pnode_name, iobj,
9067
                                            None, None, False)
9068
      result.Raise("Could not start instance")
9069

    
9070
    return list(iobj.all_nodes)
9071

    
9072

    
9073
class LUInstanceConsole(NoHooksLU):
9074
  """Connect to an instance's console.
9075

9076
  This is somewhat special in that it returns the command line that
9077
  you need to run on the master node in order to connect to the
9078
  console.
9079

9080
  """
9081
  REQ_BGL = False
9082

    
9083
  def ExpandNames(self):
9084
    self._ExpandAndLockInstance()
9085

    
9086
  def CheckPrereq(self):
9087
    """Check prerequisites.
9088

9089
    This checks that the instance is in the cluster.
9090

9091
    """
9092
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
9093
    assert self.instance is not None, \
9094
      "Cannot retrieve locked instance %s" % self.op.instance_name
9095
    _CheckNodeOnline(self, self.instance.primary_node)
9096

    
9097
  def Exec(self, feedback_fn):
9098
    """Connect to the console of an instance
9099

9100
    """
9101
    instance = self.instance
9102
    node = instance.primary_node
9103

    
9104
    node_insts = self.rpc.call_instance_list([node],
9105
                                             [instance.hypervisor])[node]
9106
    node_insts.Raise("Can't get node information from %s" % node)
9107

    
9108
    if instance.name not in node_insts.payload:
9109
      if instance.admin_up:
9110
        state = constants.INSTST_ERRORDOWN
9111
      else:
9112
        state = constants.INSTST_ADMINDOWN
9113
      raise errors.OpExecError("Instance %s is not running (state %s)" %
9114
                               (instance.name, state))
9115

    
9116
    logging.debug("Connecting to console of %s on %s", instance.name, node)
9117

    
9118
    return _GetInstanceConsole(self.cfg.GetClusterInfo(), instance)
9119

    
9120

    
9121
def _GetInstanceConsole(cluster, instance):
9122
  """Returns console information for an instance.
9123

9124
  @type cluster: L{objects.Cluster}
9125
  @type instance: L{objects.Instance}
9126
  @rtype: dict
9127

9128
  """
9129
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
9130
  # beparams and hvparams are passed separately, to avoid editing the
9131
  # instance and then saving the defaults in the instance itself.
9132
  hvparams = cluster.FillHV(instance)
9133
  beparams = cluster.FillBE(instance)
9134
  console = hyper.GetInstanceConsole(instance, hvparams, beparams)
9135

    
9136
  assert console.instance == instance.name
9137
  assert console.Validate()
9138

    
9139
  return console.ToDict()
9140

    
9141

    
9142
class LUInstanceReplaceDisks(LogicalUnit):
9143
  """Replace the disks of an instance.
9144

9145
  """
9146
  HPATH = "mirrors-replace"
9147
  HTYPE = constants.HTYPE_INSTANCE
9148
  REQ_BGL = False
9149

    
9150
  def CheckArguments(self):
9151
    TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
9152
                                  self.op.iallocator)
9153

    
9154
  def ExpandNames(self):
9155
    self._ExpandAndLockInstance()
9156

    
9157
    assert locking.LEVEL_NODE not in self.needed_locks
9158
    assert locking.LEVEL_NODEGROUP not in self.needed_locks
9159

    
9160
    assert self.op.iallocator is None or self.op.remote_node is None, \
9161
      "Conflicting options"
9162

    
9163
    if self.op.remote_node is not None:
9164
      self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
9165

    
9166
      # Warning: do not remove the locking of the new secondary here
9167
      # unless DRBD8.AddChildren is changed to work in parallel;
9168
      # currently it doesn't since parallel invocations of
9169
      # FindUnusedMinor will conflict
9170
      self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node]
9171
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
9172
    else:
9173
      self.needed_locks[locking.LEVEL_NODE] = []
9174
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
9175

    
9176
      if self.op.iallocator is not None:
9177
        # iallocator will select a new node in the same group
9178
        self.needed_locks[locking.LEVEL_NODEGROUP] = []
9179

    
9180
    self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
9181
                                   self.op.iallocator, self.op.remote_node,
9182
                                   self.op.disks, False, self.op.early_release)
9183

    
9184
    self.tasklets = [self.replacer]
9185

    
9186
  def DeclareLocks(self, level):
9187
    if level == locking.LEVEL_NODEGROUP:
9188
      assert self.op.remote_node is None
9189
      assert self.op.iallocator is not None
9190
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
9191

    
9192
      self.share_locks[locking.LEVEL_NODEGROUP] = 1
9193
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
9194
        self.cfg.GetInstanceNodeGroups(self.op.instance_name)
9195

    
9196
    elif level == locking.LEVEL_NODE:
9197
      if self.op.iallocator is not None:
9198
        assert self.op.remote_node is None
9199
        assert not self.needed_locks[locking.LEVEL_NODE]
9200

    
9201
        # Lock member nodes of all locked groups
9202
        self.needed_locks[locking.LEVEL_NODE] = [node_name
9203
          for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
9204
          for node_name in self.cfg.GetNodeGroup(group_uuid).members]
9205
      else:
9206
        self._LockInstancesNodes()
9207

    
9208
  def BuildHooksEnv(self):
9209
    """Build hooks env.
9210

9211
    This runs on the master, the primary and all the secondaries.
9212

9213
    """
9214
    instance = self.replacer.instance
9215
    env = {
9216
      "MODE": self.op.mode,
9217
      "NEW_SECONDARY": self.op.remote_node,
9218
      "OLD_SECONDARY": instance.secondary_nodes[0],
9219
      }
9220
    env.update(_BuildInstanceHookEnvByObject(self, instance))
9221
    return env
9222

    
9223
  def BuildHooksNodes(self):
9224
    """Build hooks nodes.
9225

9226
    """
9227
    instance = self.replacer.instance
9228
    nl = [
9229
      self.cfg.GetMasterNode(),
9230
      instance.primary_node,
9231
      ]
9232
    if self.op.remote_node is not None:
9233
      nl.append(self.op.remote_node)
9234
    return nl, nl
9235

    
9236
  def CheckPrereq(self):
9237
    """Check prerequisites.
9238

9239
    """
9240
    assert (self.glm.is_owned(locking.LEVEL_NODEGROUP) or
9241
            self.op.iallocator is None)
9242

    
9243
    owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
9244
    if owned_groups:
9245
      _CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
9246

    
9247
    return LogicalUnit.CheckPrereq(self)
9248

    
9249

    
9250
class TLReplaceDisks(Tasklet):
9251
  """Replaces disks for an instance.
9252

9253
  Note: Locking is not within the scope of this class.
9254

9255
  """
9256
  def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
9257
               disks, delay_iallocator, early_release):
9258
    """Initializes this class.
9259

9260
    """
9261
    Tasklet.__init__(self, lu)
9262

    
9263
    # Parameters
9264
    self.instance_name = instance_name
9265
    self.mode = mode
9266
    self.iallocator_name = iallocator_name
9267
    self.remote_node = remote_node
9268
    self.disks = disks
9269
    self.delay_iallocator = delay_iallocator
9270
    self.early_release = early_release
9271

    
9272
    # Runtime data
9273
    self.instance = None
9274
    self.new_node = None
9275
    self.target_node = None
9276
    self.other_node = None
9277
    self.remote_node_info = None
9278
    self.node_secondary_ip = None
9279

    
9280
  @staticmethod
9281
  def CheckArguments(mode, remote_node, iallocator):
9282
    """Helper function for users of this class.
9283

9284
    """
9285
    # check for valid parameter combination
9286
    if mode == constants.REPLACE_DISK_CHG:
9287
      if remote_node is None and iallocator is None:
9288
        raise errors.OpPrereqError("When changing the secondary either an"
9289
                                   " iallocator script must be used or the"
9290
                                   " new node given", errors.ECODE_INVAL)
9291

    
9292
      if remote_node is not None and iallocator is not None:
9293
        raise errors.OpPrereqError("Give either the iallocator or the new"
9294
                                   " secondary, not both", errors.ECODE_INVAL)
9295

    
9296
    elif remote_node is not None or iallocator is not None:
9297
      # Not replacing the secondary
9298
      raise errors.OpPrereqError("The iallocator and new node options can"
9299
                                 " only be used when changing the"
9300
                                 " secondary node", errors.ECODE_INVAL)
9301

    
9302
  @staticmethod
9303
  def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
9304
    """Compute a new secondary node using an IAllocator.
9305

9306
    """
9307
    ial = IAllocator(lu.cfg, lu.rpc,
9308
                     mode=constants.IALLOCATOR_MODE_RELOC,
9309
                     name=instance_name,
9310
                     relocate_from=list(relocate_from))
9311

    
9312
    ial.Run(iallocator_name)
9313

    
9314
    if not ial.success:
9315
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
9316
                                 " %s" % (iallocator_name, ial.info),
9317
                                 errors.ECODE_NORES)
9318

    
9319
    if len(ial.result) != ial.required_nodes:
9320
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
9321
                                 " of nodes (%s), required %s" %
9322
                                 (iallocator_name,
9323
                                  len(ial.result), ial.required_nodes),
9324
                                 errors.ECODE_FAULT)
9325

    
9326
    remote_node_name = ial.result[0]
9327

    
9328
    lu.LogInfo("Selected new secondary for instance '%s': %s",
9329
               instance_name, remote_node_name)
9330

    
9331
    return remote_node_name
9332

    
9333
  def _FindFaultyDisks(self, node_name):
9334
    """Wrapper for L{_FindFaultyInstanceDisks}.
9335

9336
    """
9337
    return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
9338
                                    node_name, True)
9339

    
9340
  def _CheckDisksActivated(self, instance):
9341
    """Checks if the instance disks are activated.
9342

9343
    @param instance: The instance to check disks
9344
    @return: True if they are activated, False otherwise
9345

9346
    """
9347
    nodes = instance.all_nodes
9348

    
9349
    for idx, dev in enumerate(instance.disks):
9350
      for node in nodes:
9351
        self.lu.LogInfo("Checking disk/%d on %s", idx, node)
9352
        self.cfg.SetDiskID(dev, node)
9353

    
9354
        result = self.rpc.call_blockdev_find(node, dev)
9355

    
9356
        if result.offline:
9357
          continue
9358
        elif result.fail_msg or not result.payload:
9359
          return False
9360

    
9361
    return True
9362

    
9363
  def CheckPrereq(self):
9364
    """Check prerequisites.
9365

9366
    This checks that the instance is in the cluster.
9367

9368
    """
9369
    self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
9370
    assert instance is not None, \
9371
      "Cannot retrieve locked instance %s" % self.instance_name
9372

    
9373
    if instance.disk_template != constants.DT_DRBD8:
9374
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
9375
                                 " instances", errors.ECODE_INVAL)
9376

    
9377
    if len(instance.secondary_nodes) != 1:
9378
      raise errors.OpPrereqError("The instance has a strange layout,"
9379
                                 " expected one secondary but found %d" %
9380
                                 len(instance.secondary_nodes),
9381
                                 errors.ECODE_FAULT)
9382

    
9383
    if not self.delay_iallocator:
9384
      self._CheckPrereq2()
9385

    
9386
  def _CheckPrereq2(self):
9387
    """Check prerequisites, second part.
9388

9389
    This function should always be part of CheckPrereq. It was separated and is
9390
    now called from Exec because during node evacuation iallocator was only
9391
    called with an unmodified cluster model, not taking planned changes into
9392
    account.
9393

9394
    """
9395
    instance = self.instance
9396
    secondary_node = instance.secondary_nodes[0]
9397

    
9398
    if self.iallocator_name is None:
9399
      remote_node = self.remote_node
9400
    else:
9401
      remote_node = self._RunAllocator(self.lu, self.iallocator_name,
9402
                                       instance.name, instance.secondary_nodes)
9403

    
9404
    if remote_node is None:
9405
      self.remote_node_info = None
9406
    else:
9407
      assert remote_node in self.lu.owned_locks(locking.LEVEL_NODE), \
9408
             "Remote node '%s' is not locked" % remote_node
9409

    
9410
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
9411
      assert self.remote_node_info is not None, \
9412
        "Cannot retrieve locked node %s" % remote_node
9413

    
9414
    if remote_node == self.instance.primary_node:
9415
      raise errors.OpPrereqError("The specified node is the primary node of"
9416
                                 " the instance", errors.ECODE_INVAL)
9417

    
9418
    if remote_node == secondary_node:
9419
      raise errors.OpPrereqError("The specified node is already the"
9420
                                 " secondary node of the instance",
9421
                                 errors.ECODE_INVAL)
9422

    
9423
    if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
9424
                                    constants.REPLACE_DISK_CHG):
9425
      raise errors.OpPrereqError("Cannot specify disks to be replaced",
9426
                                 errors.ECODE_INVAL)
9427

    
9428
    if self.mode == constants.REPLACE_DISK_AUTO:
9429
      if not self._CheckDisksActivated(instance):
9430
        raise errors.OpPrereqError("Please run activate-disks on instance %s"
9431
                                   " first" % self.instance_name,
9432
                                   errors.ECODE_STATE)
9433
      faulty_primary = self._FindFaultyDisks(instance.primary_node)
9434
      faulty_secondary = self._FindFaultyDisks(secondary_node)
9435

    
9436
      if faulty_primary and faulty_secondary:
9437
        raise errors.OpPrereqError("Instance %s has faulty disks on more than"
9438
                                   " one node and can not be repaired"
9439
                                   " automatically" % self.instance_name,
9440
                                   errors.ECODE_STATE)
9441

    
9442
      if faulty_primary:
9443
        self.disks = faulty_primary
9444
        self.target_node = instance.primary_node
9445
        self.other_node = secondary_node
9446
        check_nodes = [self.target_node, self.other_node]
9447
      elif faulty_secondary:
9448
        self.disks = faulty_secondary
9449
        self.target_node = secondary_node
9450
        self.other_node = instance.primary_node
9451
        check_nodes = [self.target_node, self.other_node]
9452
      else:
9453
        self.disks = []
9454
        check_nodes = []
9455

    
9456
    else:
9457
      # Non-automatic modes
9458
      if self.mode == constants.REPLACE_DISK_PRI:
9459
        self.target_node = instance.primary_node
9460
        self.other_node = secondary_node
9461
        check_nodes = [self.target_node, self.other_node]
9462

    
9463
      elif self.mode == constants.REPLACE_DISK_SEC:
9464
        self.target_node = secondary_node
9465
        self.other_node = instance.primary_node
9466
        check_nodes = [self.target_node, self.other_node]
9467

    
9468
      elif self.mode == constants.REPLACE_DISK_CHG:
9469
        self.new_node = remote_node
9470
        self.other_node = instance.primary_node
9471
        self.target_node = secondary_node
9472
        check_nodes = [self.new_node, self.other_node]
9473

    
9474
        _CheckNodeNotDrained(self.lu, remote_node)
9475
        _CheckNodeVmCapable(self.lu, remote_node)
9476

    
9477
        old_node_info = self.cfg.GetNodeInfo(secondary_node)
9478
        assert old_node_info is not None
9479
        if old_node_info.offline and not self.early_release:
9480
          # doesn't make sense to delay the release
9481
          self.early_release = True
9482
          self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
9483
                          " early-release mode", secondary_node)
9484

    
9485
      else:
9486
        raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
9487
                                     self.mode)
9488

    
9489
      # If not specified all disks should be replaced
9490
      if not self.disks:
9491
        self.disks = range(len(self.instance.disks))
9492

    
9493
    for node in check_nodes:
9494
      _CheckNodeOnline(self.lu, node)
9495

    
9496
    touched_nodes = frozenset(node_name for node_name in [self.new_node,
9497
                                                          self.other_node,
9498
                                                          self.target_node]
9499
                              if node_name is not None)
9500

    
9501
    # Release unneeded node locks
9502
    _ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes)
9503

    
9504
    # Release any owned node group
9505
    if self.lu.glm.is_owned(locking.LEVEL_NODEGROUP):
9506
      _ReleaseLocks(self.lu, locking.LEVEL_NODEGROUP)
9507

    
9508
    # Check whether disks are valid
9509
    for disk_idx in self.disks:
9510
      instance.FindDisk(disk_idx)
9511

    
9512
    # Get secondary node IP addresses
9513
    self.node_secondary_ip = dict((name, node.secondary_ip) for (name, node)
9514
                                  in self.cfg.GetMultiNodeInfo(touched_nodes))
9515

    
9516
  def Exec(self, feedback_fn):
9517
    """Execute disk replacement.
9518

9519
    This dispatches the disk replacement to the appropriate handler.
9520

9521
    """
9522
    if self.delay_iallocator:
9523
      self._CheckPrereq2()
9524

    
9525
    if __debug__:
9526
      # Verify owned locks before starting operation
9527
      owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE)
9528
      assert set(owned_nodes) == set(self.node_secondary_ip), \
9529
          ("Incorrect node locks, owning %s, expected %s" %
9530
           (owned_nodes, self.node_secondary_ip.keys()))
9531

    
9532
      owned_instances = self.lu.owned_locks(locking.LEVEL_INSTANCE)
9533
      assert list(owned_instances) == [self.instance_name], \
9534
          "Instance '%s' not locked" % self.instance_name
9535

    
9536
      assert not self.lu.glm.is_owned(locking.LEVEL_NODEGROUP), \
9537
          "Should not own any node group lock at this point"
9538

    
9539
    if not self.disks:
9540
      feedback_fn("No disks need replacement")
9541
      return
9542

    
9543
    feedback_fn("Replacing disk(s) %s for %s" %
9544
                (utils.CommaJoin(self.disks), self.instance.name))
9545

    
9546
    activate_disks = (not self.instance.admin_up)
9547

    
9548
    # Activate the instance disks if we're replacing them on a down instance
9549
    if activate_disks:
9550
      _StartInstanceDisks(self.lu, self.instance, True)
9551

    
9552
    try:
9553
      # Should we replace the secondary node?
9554
      if self.new_node is not None:
9555
        fn = self._ExecDrbd8Secondary
9556
      else:
9557
        fn = self._ExecDrbd8DiskOnly
9558

    
9559
      result = fn(feedback_fn)
9560
    finally:
9561
      # Deactivate the instance disks if we're replacing them on a
9562
      # down instance
9563
      if activate_disks:
9564
        _SafeShutdownInstanceDisks(self.lu, self.instance)
9565

    
9566
    if __debug__:
9567
      # Verify owned locks
9568
      owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE)
9569
      nodes = frozenset(self.node_secondary_ip)
9570
      assert ((self.early_release and not owned_nodes) or
9571
              (not self.early_release and not (set(owned_nodes) - nodes))), \
9572
        ("Not owning the correct locks, early_release=%s, owned=%r,"
9573
         " nodes=%r" % (self.early_release, owned_nodes, nodes))
9574

    
9575
    return result
9576

    
9577
  def _CheckVolumeGroup(self, nodes):
9578
    self.lu.LogInfo("Checking volume groups")
9579

    
9580
    vgname = self.cfg.GetVGName()
9581

    
9582
    # Make sure volume group exists on all involved nodes
9583
    results = self.rpc.call_vg_list(nodes)
9584
    if not results:
9585
      raise errors.OpExecError("Can't list volume groups on the nodes")
9586

    
9587
    for node in nodes:
9588
      res = results[node]
9589
      res.Raise("Error checking node %s" % node)
9590
      if vgname not in res.payload:
9591
        raise errors.OpExecError("Volume group '%s' not found on node %s" %
9592
                                 (vgname, node))
9593

    
9594
  def _CheckDisksExistence(self, nodes):
9595
    # Check disk existence
9596
    for idx, dev in enumerate(self.instance.disks):
9597
      if idx not in self.disks:
9598
        continue
9599

    
9600
      for node in nodes:
9601
        self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
9602
        self.cfg.SetDiskID(dev, node)
9603

    
9604
        result = self.rpc.call_blockdev_find(node, dev)
9605

    
9606
        msg = result.fail_msg
9607
        if msg or not result.payload:
9608
          if not msg:
9609
            msg = "disk not found"
9610
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
9611
                                   (idx, node, msg))
9612

    
9613
  def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
9614
    for idx, dev in enumerate(self.instance.disks):
9615
      if idx not in self.disks:
9616
        continue
9617

    
9618
      self.lu.LogInfo("Checking disk/%d consistency on node %s" %
9619
                      (idx, node_name))
9620

    
9621
      if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary,
9622
                                   ldisk=ldisk):
9623
        raise errors.OpExecError("Node %s has degraded storage, unsafe to"
9624
                                 " replace disks for instance %s" %
9625
                                 (node_name, self.instance.name))
9626

    
9627
  def _CreateNewStorage(self, node_name):
9628
    """Create new storage on the primary or secondary node.
9629

9630
    This is only used for same-node replaces, not for changing the
9631
    secondary node, hence we don't want to modify the existing disk.
9632

9633
    """
9634
    iv_names = {}
9635

    
9636
    for idx, dev in enumerate(self.instance.disks):
9637
      if idx not in self.disks:
9638
        continue
9639

    
9640
      self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
9641

    
9642
      self.cfg.SetDiskID(dev, node_name)
9643

    
9644
      lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
9645
      names = _GenerateUniqueNames(self.lu, lv_names)
9646

    
9647
      vg_data = dev.children[0].logical_id[0]
9648
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
9649
                             logical_id=(vg_data, names[0]))
9650
      vg_meta = dev.children[1].logical_id[0]
9651
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
9652
                             logical_id=(vg_meta, names[1]))
9653

    
9654
      new_lvs = [lv_data, lv_meta]
9655
      old_lvs = [child.Copy() for child in dev.children]
9656
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
9657

    
9658
      # we pass force_create=True to force the LVM creation
9659
      for new_lv in new_lvs:
9660
        _CreateBlockDev(self.lu, node_name, self.instance, new_lv, True,
9661
                        _GetInstanceInfoText(self.instance), False)
9662

    
9663
    return iv_names
9664

    
9665
  def _CheckDevices(self, node_name, iv_names):
9666
    for name, (dev, _, _) in iv_names.iteritems():
9667
      self.cfg.SetDiskID(dev, node_name)
9668

    
9669
      result = self.rpc.call_blockdev_find(node_name, dev)
9670

    
9671
      msg = result.fail_msg
9672
      if msg or not result.payload:
9673
        if not msg:
9674
          msg = "disk not found"
9675
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
9676
                                 (name, msg))
9677

    
9678
      if result.payload.is_degraded:
9679
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
9680

    
9681
  def _RemoveOldStorage(self, node_name, iv_names):
9682
    for name, (_, old_lvs, _) in iv_names.iteritems():
9683
      self.lu.LogInfo("Remove logical volumes for %s" % name)
9684

    
9685
      for lv in old_lvs:
9686
        self.cfg.SetDiskID(lv, node_name)
9687

    
9688
        msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
9689
        if msg:
9690
          self.lu.LogWarning("Can't remove old LV: %s" % msg,
9691
                             hint="remove unused LVs manually")
9692

    
9693
  def _ExecDrbd8DiskOnly(self, feedback_fn): # pylint: disable=W0613
9694
    """Replace a disk on the primary or secondary for DRBD 8.
9695

9696
    The algorithm for replace is quite complicated:
9697

9698
      1. for each disk to be replaced:
9699

9700
        1. create new LVs on the target node with unique names
9701
        1. detach old LVs from the drbd device
9702
        1. rename old LVs to name_replaced.<time_t>
9703
        1. rename new LVs to old LVs
9704
        1. attach the new LVs (with the old names now) to the drbd device
9705

9706
      1. wait for sync across all devices
9707

9708
      1. for each modified disk:
9709

9710
        1. remove old LVs (which have the name name_replaces.<time_t>)
9711

9712
    Failures are not very well handled.
9713

9714
    """
9715
    steps_total = 6
9716

    
9717
    # Step: check device activation
9718
    self.lu.LogStep(1, steps_total, "Check device existence")
9719
    self._CheckDisksExistence([self.other_node, self.target_node])
9720
    self._CheckVolumeGroup([self.target_node, self.other_node])
9721

    
9722
    # Step: check other node consistency
9723
    self.lu.LogStep(2, steps_total, "Check peer consistency")
9724
    self._CheckDisksConsistency(self.other_node,
9725
                                self.other_node == self.instance.primary_node,
9726
                                False)
9727

    
9728
    # Step: create new storage
9729
    self.lu.LogStep(3, steps_total, "Allocate new storage")
9730
    iv_names = self._CreateNewStorage(self.target_node)
9731

    
9732
    # Step: for each lv, detach+rename*2+attach
9733
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
9734
    for dev, old_lvs, new_lvs in iv_names.itervalues():
9735
      self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
9736

    
9737
      result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
9738
                                                     old_lvs)
9739
      result.Raise("Can't detach drbd from local storage on node"
9740
                   " %s for device %s" % (self.target_node, dev.iv_name))
9741
      #dev.children = []
9742
      #cfg.Update(instance)
9743

    
9744
      # ok, we created the new LVs, so now we know we have the needed
9745
      # storage; as such, we proceed on the target node to rename
9746
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
9747
      # using the assumption that logical_id == physical_id (which in
9748
      # turn is the unique_id on that node)
9749

    
9750
      # FIXME(iustin): use a better name for the replaced LVs
9751
      temp_suffix = int(time.time())
9752
      ren_fn = lambda d, suff: (d.physical_id[0],
9753
                                d.physical_id[1] + "_replaced-%s" % suff)
9754

    
9755
      # Build the rename list based on what LVs exist on the node
9756
      rename_old_to_new = []
9757
      for to_ren in old_lvs:
9758
        result = self.rpc.call_blockdev_find(self.target_node, to_ren)
9759
        if not result.fail_msg and result.payload:
9760
          # device exists
9761
          rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
9762

    
9763
      self.lu.LogInfo("Renaming the old LVs on the target node")
9764
      result = self.rpc.call_blockdev_rename(self.target_node,
9765
                                             rename_old_to_new)
9766
      result.Raise("Can't rename old LVs on node %s" % self.target_node)
9767

    
9768
      # Now we rename the new LVs to the old LVs
9769
      self.lu.LogInfo("Renaming the new LVs on the target node")
9770
      rename_new_to_old = [(new, old.physical_id)
9771
                           for old, new in zip(old_lvs, new_lvs)]
9772
      result = self.rpc.call_blockdev_rename(self.target_node,
9773
                                             rename_new_to_old)
9774
      result.Raise("Can't rename new LVs on node %s" % self.target_node)
9775

    
9776
      # Intermediate steps of in memory modifications
9777
      for old, new in zip(old_lvs, new_lvs):
9778
        new.logical_id = old.logical_id
9779
        self.cfg.SetDiskID(new, self.target_node)
9780

    
9781
      # We need to modify old_lvs so that removal later removes the
9782
      # right LVs, not the newly added ones; note that old_lvs is a
9783
      # copy here
9784
      for disk in old_lvs:
9785
        disk.logical_id = ren_fn(disk, temp_suffix)
9786
        self.cfg.SetDiskID(disk, self.target_node)
9787

    
9788
      # Now that the new lvs have the old name, we can add them to the device
9789
      self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
9790
      result = self.rpc.call_blockdev_addchildren(self.target_node, dev,
9791
                                                  new_lvs)
9792
      msg = result.fail_msg
9793
      if msg:
9794
        for new_lv in new_lvs:
9795
          msg2 = self.rpc.call_blockdev_remove(self.target_node,
9796
                                               new_lv).fail_msg
9797
          if msg2:
9798
            self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
9799
                               hint=("cleanup manually the unused logical"
9800
                                     "volumes"))
9801
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
9802

    
9803
    cstep = 5
9804
    if self.early_release:
9805
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
9806
      cstep += 1
9807
      self._RemoveOldStorage(self.target_node, iv_names)
9808
      # WARNING: we release both node locks here, do not do other RPCs
9809
      # than WaitForSync to the primary node
9810
      _ReleaseLocks(self.lu, locking.LEVEL_NODE,
9811
                    names=[self.target_node, self.other_node])
9812

    
9813
    # Wait for sync
9814
    # This can fail as the old devices are degraded and _WaitForSync
9815
    # does a combined result over all disks, so we don't check its return value
9816
    self.lu.LogStep(cstep, steps_total, "Sync devices")
9817
    cstep += 1
9818
    _WaitForSync(self.lu, self.instance)
9819

    
9820
    # Check all devices manually
9821
    self._CheckDevices(self.instance.primary_node, iv_names)
9822

    
9823
    # Step: remove old storage
9824
    if not self.early_release:
9825
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
9826
      cstep += 1
9827
      self._RemoveOldStorage(self.target_node, iv_names)
9828

    
9829
  def _ExecDrbd8Secondary(self, feedback_fn):
9830
    """Replace the secondary node for DRBD 8.
9831

9832
    The algorithm for replace is quite complicated:
9833
      - for all disks of the instance:
9834
        - create new LVs on the new node with same names
9835
        - shutdown the drbd device on the old secondary
9836
        - disconnect the drbd network on the primary
9837
        - create the drbd device on the new secondary
9838
        - network attach the drbd on the primary, using an artifice:
9839
          the drbd code for Attach() will connect to the network if it
9840
          finds a device which is connected to the good local disks but
9841
          not network enabled
9842
      - wait for sync across all devices
9843
      - remove all disks from the old secondary
9844

9845
    Failures are not very well handled.
9846

9847
    """
9848
    steps_total = 6
9849

    
9850
    pnode = self.instance.primary_node
9851

    
9852
    # Step: check device activation
9853
    self.lu.LogStep(1, steps_total, "Check device existence")
9854
    self._CheckDisksExistence([self.instance.primary_node])
9855
    self._CheckVolumeGroup([self.instance.primary_node])
9856

    
9857
    # Step: check other node consistency
9858
    self.lu.LogStep(2, steps_total, "Check peer consistency")
9859
    self._CheckDisksConsistency(self.instance.primary_node, True, True)
9860

    
9861
    # Step: create new storage
9862
    self.lu.LogStep(3, steps_total, "Allocate new storage")
9863
    for idx, dev in enumerate(self.instance.disks):
9864
      self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
9865
                      (self.new_node, idx))
9866
      # we pass force_create=True to force LVM creation
9867
      for new_lv in dev.children:
9868
        _CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True,
9869
                        _GetInstanceInfoText(self.instance), False)
9870

    
9871
    # Step 4: dbrd minors and drbd setups changes
9872
    # after this, we must manually remove the drbd minors on both the
9873
    # error and the success paths
9874
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
9875
    minors = self.cfg.AllocateDRBDMinor([self.new_node
9876
                                         for dev in self.instance.disks],
9877
                                        self.instance.name)
9878
    logging.debug("Allocated minors %r", minors)
9879

    
9880
    iv_names = {}
9881
    for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
9882
      self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
9883
                      (self.new_node, idx))
9884
      # create new devices on new_node; note that we create two IDs:
9885
      # one without port, so the drbd will be activated without
9886
      # networking information on the new node at this stage, and one
9887
      # with network, for the latter activation in step 4
9888
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
9889
      if self.instance.primary_node == o_node1:
9890
        p_minor = o_minor1
9891
      else:
9892
        assert self.instance.primary_node == o_node2, "Three-node instance?"
9893
        p_minor = o_minor2
9894

    
9895
      new_alone_id = (self.instance.primary_node, self.new_node, None,
9896
                      p_minor, new_minor, o_secret)
9897
      new_net_id = (self.instance.primary_node, self.new_node, o_port,
9898
                    p_minor, new_minor, o_secret)
9899

    
9900
      iv_names[idx] = (dev, dev.children, new_net_id)
9901
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
9902
                    new_net_id)
9903
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
9904
                              logical_id=new_alone_id,
9905
                              children=dev.children,
9906
                              size=dev.size)
9907
      try:
9908
        _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
9909
                              _GetInstanceInfoText(self.instance), False)
9910
      except errors.GenericError:
9911
        self.cfg.ReleaseDRBDMinors(self.instance.name)
9912
        raise
9913

    
9914
    # We have new devices, shutdown the drbd on the old secondary
9915
    for idx, dev in enumerate(self.instance.disks):
9916
      self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
9917
      self.cfg.SetDiskID(dev, self.target_node)
9918
      msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
9919
      if msg:
9920
        self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
9921
                           "node: %s" % (idx, msg),
9922
                           hint=("Please cleanup this device manually as"
9923
                                 " soon as possible"))
9924

    
9925
    self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
9926
    result = self.rpc.call_drbd_disconnect_net([pnode], self.node_secondary_ip,
9927
                                               self.instance.disks)[pnode]
9928

    
9929
    msg = result.fail_msg
9930
    if msg:
9931
      # detaches didn't succeed (unlikely)
9932
      self.cfg.ReleaseDRBDMinors(self.instance.name)
9933
      raise errors.OpExecError("Can't detach the disks from the network on"
9934
                               " old node: %s" % (msg,))
9935

    
9936
    # if we managed to detach at least one, we update all the disks of
9937
    # the instance to point to the new secondary
9938
    self.lu.LogInfo("Updating instance configuration")
9939
    for dev, _, new_logical_id in iv_names.itervalues():
9940
      dev.logical_id = new_logical_id
9941
      self.cfg.SetDiskID(dev, self.instance.primary_node)
9942

    
9943
    self.cfg.Update(self.instance, feedback_fn)
9944

    
9945
    # and now perform the drbd attach
9946
    self.lu.LogInfo("Attaching primary drbds to new secondary"
9947
                    " (standalone => connected)")
9948
    result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
9949
                                            self.new_node],
9950
                                           self.node_secondary_ip,
9951
                                           self.instance.disks,
9952
                                           self.instance.name,
9953
                                           False)
9954
    for to_node, to_result in result.items():
9955
      msg = to_result.fail_msg
9956
      if msg:
9957
        self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
9958
                           to_node, msg,
9959
                           hint=("please do a gnt-instance info to see the"
9960
                                 " status of disks"))
9961
    cstep = 5
9962
    if self.early_release:
9963
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
9964
      cstep += 1
9965
      self._RemoveOldStorage(self.target_node, iv_names)
9966
      # WARNING: we release all node locks here, do not do other RPCs
9967
      # than WaitForSync to the primary node
9968
      _ReleaseLocks(self.lu, locking.LEVEL_NODE,
9969
                    names=[self.instance.primary_node,
9970
                           self.target_node,
9971
                           self.new_node])
9972

    
9973
    # Wait for sync
9974
    # This can fail as the old devices are degraded and _WaitForSync
9975
    # does a combined result over all disks, so we don't check its return value
9976
    self.lu.LogStep(cstep, steps_total, "Sync devices")
9977
    cstep += 1
9978
    _WaitForSync(self.lu, self.instance)
9979

    
9980
    # Check all devices manually
9981
    self._CheckDevices(self.instance.primary_node, iv_names)
9982

    
9983
    # Step: remove old storage
9984
    if not self.early_release:
9985
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
9986
      self._RemoveOldStorage(self.target_node, iv_names)
9987

    
9988

    
9989
class LURepairNodeStorage(NoHooksLU):
9990
  """Repairs the volume group on a node.
9991

9992
  """
9993
  REQ_BGL = False
9994

    
9995
  def CheckArguments(self):
9996
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
9997

    
9998
    storage_type = self.op.storage_type
9999

    
10000
    if (constants.SO_FIX_CONSISTENCY not in
10001
        constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
10002
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
10003
                                 " repaired" % storage_type,
10004
                                 errors.ECODE_INVAL)
10005

    
10006
  def ExpandNames(self):
10007
    self.needed_locks = {
10008
      locking.LEVEL_NODE: [self.op.node_name],
10009
      }
10010

    
10011
  def _CheckFaultyDisks(self, instance, node_name):
10012
    """Ensure faulty disks abort the opcode or at least warn."""
10013
    try:
10014
      if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
10015
                                  node_name, True):
10016
        raise errors.OpPrereqError("Instance '%s' has faulty disks on"
10017
                                   " node '%s'" % (instance.name, node_name),
10018
                                   errors.ECODE_STATE)
10019
    except errors.OpPrereqError, err:
10020
      if self.op.ignore_consistency:
10021
        self.proc.LogWarning(str(err.args[0]))
10022
      else:
10023
        raise
10024

    
10025
  def CheckPrereq(self):
10026
    """Check prerequisites.
10027

10028
    """
10029
    # Check whether any instance on this node has faulty disks
10030
    for inst in _GetNodeInstances(self.cfg, self.op.node_name):
10031
      if not inst.admin_up:
10032
        continue
10033
      check_nodes = set(inst.all_nodes)
10034
      check_nodes.discard(self.op.node_name)
10035
      for inst_node_name in check_nodes:
10036
        self._CheckFaultyDisks(inst, inst_node_name)
10037

    
10038
  def Exec(self, feedback_fn):
10039
    feedback_fn("Repairing storage unit '%s' on %s ..." %
10040
                (self.op.name, self.op.node_name))
10041

    
10042
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
10043
    result = self.rpc.call_storage_execute(self.op.node_name,
10044
                                           self.op.storage_type, st_args,
10045
                                           self.op.name,
10046
                                           constants.SO_FIX_CONSISTENCY)
10047
    result.Raise("Failed to repair storage unit '%s' on %s" %
10048
                 (self.op.name, self.op.node_name))
10049

    
10050

    
10051
class LUNodeEvacuate(NoHooksLU):
10052
  """Evacuates instances off a list of nodes.
10053

10054
  """
10055
  REQ_BGL = False
10056

    
10057
  def CheckArguments(self):
10058
    _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
10059

    
10060
  def ExpandNames(self):
10061
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
10062

    
10063
    if self.op.remote_node is not None:
10064
      self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
10065
      assert self.op.remote_node
10066

    
10067
      if self.op.remote_node == self.op.node_name:
10068
        raise errors.OpPrereqError("Can not use evacuated node as a new"
10069
                                   " secondary node", errors.ECODE_INVAL)
10070

    
10071
      if self.op.mode != constants.IALLOCATOR_NEVAC_SEC:
10072
        raise errors.OpPrereqError("Without the use of an iallocator only"
10073
                                   " secondary instances can be evacuated",
10074
                                   errors.ECODE_INVAL)
10075

    
10076
    # Declare locks
10077
    self.share_locks = _ShareAll()
10078
    self.needed_locks = {
10079
      locking.LEVEL_INSTANCE: [],
10080
      locking.LEVEL_NODEGROUP: [],
10081
      locking.LEVEL_NODE: [],
10082
      }
10083

    
10084
    if self.op.remote_node is None:
10085
      # Iallocator will choose any node(s) in the same group
10086
      group_nodes = self.cfg.GetNodeGroupMembersByNodes([self.op.node_name])
10087
    else:
10088
      group_nodes = frozenset([self.op.remote_node])
10089

    
10090
    # Determine nodes to be locked
10091
    self.lock_nodes = set([self.op.node_name]) | group_nodes
10092

    
10093
  def _DetermineInstances(self):
10094
    """Builds list of instances to operate on.
10095

10096
    """
10097
    assert self.op.mode in constants.IALLOCATOR_NEVAC_MODES
10098

    
10099
    if self.op.mode == constants.IALLOCATOR_NEVAC_PRI:
10100
      # Primary instances only
10101
      inst_fn = _GetNodePrimaryInstances
10102
      assert self.op.remote_node is None, \
10103
        "Evacuating primary instances requires iallocator"
10104
    elif self.op.mode == constants.IALLOCATOR_NEVAC_SEC:
10105
      # Secondary instances only
10106
      inst_fn = _GetNodeSecondaryInstances
10107
    else:
10108
      # All instances
10109
      assert self.op.mode == constants.IALLOCATOR_NEVAC_ALL
10110
      inst_fn = _GetNodeInstances
10111

    
10112
    return inst_fn(self.cfg, self.op.node_name)
10113

    
10114
  def DeclareLocks(self, level):
10115
    if level == locking.LEVEL_INSTANCE:
10116
      # Lock instances optimistically, needs verification once node and group
10117
      # locks have been acquired
10118
      self.needed_locks[locking.LEVEL_INSTANCE] = \
10119
        set(i.name for i in self._DetermineInstances())
10120

    
10121
    elif level == locking.LEVEL_NODEGROUP:
10122
      # Lock node groups optimistically, needs verification once nodes have
10123
      # been acquired
10124
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
10125
        self.cfg.GetNodeGroupsFromNodes(self.lock_nodes)
10126

    
10127
    elif level == locking.LEVEL_NODE:
10128
      self.needed_locks[locking.LEVEL_NODE] = self.lock_nodes
10129

    
10130
  def CheckPrereq(self):
10131
    # Verify locks
10132
    owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
10133
    owned_nodes = self.owned_locks(locking.LEVEL_NODE)
10134
    owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
10135

    
10136
    assert owned_nodes == self.lock_nodes
10137

    
10138
    wanted_groups = self.cfg.GetNodeGroupsFromNodes(owned_nodes)
10139
    if owned_groups != wanted_groups:
10140
      raise errors.OpExecError("Node groups changed since locks were acquired,"
10141
                               " current groups are '%s', used to be '%s'" %
10142
                               (utils.CommaJoin(wanted_groups),
10143
                                utils.CommaJoin(owned_groups)))
10144

    
10145
    # Determine affected instances
10146
    self.instances = self._DetermineInstances()
10147
    self.instance_names = [i.name for i in self.instances]
10148

    
10149
    if set(self.instance_names) != owned_instances:
10150
      raise errors.OpExecError("Instances on node '%s' changed since locks"
10151
                               " were acquired, current instances are '%s',"
10152
                               " used to be '%s'" %
10153
                               (self.op.node_name,
10154
                                utils.CommaJoin(self.instance_names),
10155
                                utils.CommaJoin(owned_instances)))
10156

    
10157
    if self.instance_names:
10158
      self.LogInfo("Evacuating instances from node '%s': %s",
10159
                   self.op.node_name,
10160
                   utils.CommaJoin(utils.NiceSort(self.instance_names)))
10161
    else:
10162
      self.LogInfo("No instances to evacuate from node '%s'",
10163
                   self.op.node_name)
10164

    
10165
    if self.op.remote_node is not None:
10166
      for i in self.instances:
10167
        if i.primary_node == self.op.remote_node:
10168
          raise errors.OpPrereqError("Node %s is the primary node of"
10169
                                     " instance %s, cannot use it as"
10170
                                     " secondary" %
10171
                                     (self.op.remote_node, i.name),
10172
                                     errors.ECODE_INVAL)
10173

    
10174
  def Exec(self, feedback_fn):
10175
    assert (self.op.iallocator is not None) ^ (self.op.remote_node is not None)
10176

    
10177
    if not self.instance_names:
10178
      # No instances to evacuate
10179
      jobs = []
10180

    
10181
    elif self.op.iallocator is not None:
10182
      # TODO: Implement relocation to other group
10183
      ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_NODE_EVAC,
10184
                       evac_mode=self.op.mode,
10185
                       instances=list(self.instance_names))
10186

    
10187
      ial.Run(self.op.iallocator)
10188

    
10189
      if not ial.success:
10190
        raise errors.OpPrereqError("Can't compute node evacuation using"
10191
                                   " iallocator '%s': %s" %
10192
                                   (self.op.iallocator, ial.info),
10193
                                   errors.ECODE_NORES)
10194

    
10195
      jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, True)
10196

    
10197
    elif self.op.remote_node is not None:
10198
      assert self.op.mode == constants.IALLOCATOR_NEVAC_SEC
10199
      jobs = [
10200
        [opcodes.OpInstanceReplaceDisks(instance_name=instance_name,
10201
                                        remote_node=self.op.remote_node,
10202
                                        disks=[],
10203
                                        mode=constants.REPLACE_DISK_CHG,
10204
                                        early_release=self.op.early_release)]
10205
        for instance_name in self.instance_names
10206
        ]
10207

    
10208
    else:
10209
      raise errors.ProgrammerError("No iallocator or remote node")
10210

    
10211
    return ResultWithJobs(jobs)
10212

    
10213

    
10214
def _SetOpEarlyRelease(early_release, op):
10215
  """Sets C{early_release} flag on opcodes if available.
10216

10217
  """
10218
  try:
10219
    op.early_release = early_release
10220
  except AttributeError:
10221
    assert not isinstance(op, opcodes.OpInstanceReplaceDisks)
10222

    
10223
  return op
10224

    
10225

    
10226
def _NodeEvacDest(use_nodes, group, nodes):
10227
  """Returns group or nodes depending on caller's choice.
10228

10229
  """
10230
  if use_nodes:
10231
    return utils.CommaJoin(nodes)
10232
  else:
10233
    return group
10234

    
10235

    
10236
def _LoadNodeEvacResult(lu, alloc_result, early_release, use_nodes):
10237
  """Unpacks the result of change-group and node-evacuate iallocator requests.
10238

10239
  Iallocator modes L{constants.IALLOCATOR_MODE_NODE_EVAC} and
10240
  L{constants.IALLOCATOR_MODE_CHG_GROUP}.
10241

10242
  @type lu: L{LogicalUnit}
10243
  @param lu: Logical unit instance
10244
  @type alloc_result: tuple/list
10245
  @param alloc_result: Result from iallocator
10246
  @type early_release: bool
10247
  @param early_release: Whether to release locks early if possible
10248
  @type use_nodes: bool
10249
  @param use_nodes: Whether to display node names instead of groups
10250

10251
  """
10252
  (moved, failed, jobs) = alloc_result
10253

    
10254
  if failed:
10255
    lu.LogWarning("Unable to evacuate instances %s",
10256
                  utils.CommaJoin("%s (%s)" % (name, reason)
10257
                                  for (name, reason) in failed))
10258

    
10259
  if moved:
10260
    lu.LogInfo("Instances to be moved: %s",
10261
               utils.CommaJoin("%s (to %s)" %
10262
                               (name, _NodeEvacDest(use_nodes, group, nodes))
10263
                               for (name, group, nodes) in moved))
10264

    
10265
  return [map(compat.partial(_SetOpEarlyRelease, early_release),
10266
              map(opcodes.OpCode.LoadOpCode, ops))
10267
          for ops in jobs]
10268

    
10269

    
10270
class LUInstanceGrowDisk(LogicalUnit):
10271
  """Grow a disk of an instance.
10272

10273
  """
10274
  HPATH = "disk-grow"
10275
  HTYPE = constants.HTYPE_INSTANCE
10276
  REQ_BGL = False
10277

    
10278
  def ExpandNames(self):
10279
    self._ExpandAndLockInstance()
10280
    self.needed_locks[locking.LEVEL_NODE] = []
10281
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
10282

    
10283
  def DeclareLocks(self, level):
10284
    if level == locking.LEVEL_NODE:
10285
      self._LockInstancesNodes()
10286

    
10287
  def BuildHooksEnv(self):
10288
    """Build hooks env.
10289

10290
    This runs on the master, the primary and all the secondaries.
10291

10292
    """
10293
    env = {
10294
      "DISK": self.op.disk,
10295
      "AMOUNT": self.op.amount,
10296
      }
10297
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
10298
    return env
10299

    
10300
  def BuildHooksNodes(self):
10301
    """Build hooks nodes.
10302

10303
    """
10304
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
10305
    return (nl, nl)
10306

    
10307
  def CheckPrereq(self):
10308
    """Check prerequisites.
10309

10310
    This checks that the instance is in the cluster.
10311

10312
    """
10313
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
10314
    assert instance is not None, \
10315
      "Cannot retrieve locked instance %s" % self.op.instance_name
10316
    nodenames = list(instance.all_nodes)
10317
    for node in nodenames:
10318
      _CheckNodeOnline(self, node)
10319

    
10320
    self.instance = instance
10321

    
10322
    if instance.disk_template not in constants.DTS_GROWABLE:
10323
      raise errors.OpPrereqError("Instance's disk layout does not support"
10324
                                 " growing", errors.ECODE_INVAL)
10325

    
10326
    self.disk = instance.FindDisk(self.op.disk)
10327

    
10328
    if instance.disk_template not in (constants.DT_FILE,
10329
                                      constants.DT_SHARED_FILE):
10330
      # TODO: check the free disk space for file, when that feature will be
10331
      # supported
10332
      _CheckNodesFreeDiskPerVG(self, nodenames,
10333
                               self.disk.ComputeGrowth(self.op.amount))
10334

    
10335
  def Exec(self, feedback_fn):
10336
    """Execute disk grow.
10337

10338
    """
10339
    instance = self.instance
10340
    disk = self.disk
10341

    
10342
    disks_ok, _ = _AssembleInstanceDisks(self, self.instance, disks=[disk])
10343
    if not disks_ok:
10344
      raise errors.OpExecError("Cannot activate block device to grow")
10345

    
10346
    # First run all grow ops in dry-run mode
10347
    for node in instance.all_nodes:
10348
      self.cfg.SetDiskID(disk, node)
10349
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount, True)
10350
      result.Raise("Grow request failed to node %s" % node)
10351

    
10352
    # We know that (as far as we can test) operations across different
10353
    # nodes will succeed, time to run it for real
10354
    for node in instance.all_nodes:
10355
      self.cfg.SetDiskID(disk, node)
10356
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount, False)
10357
      result.Raise("Grow request failed to node %s" % node)
10358

    
10359
      # TODO: Rewrite code to work properly
10360
      # DRBD goes into sync mode for a short amount of time after executing the
10361
      # "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby
10362
      # calling "resize" in sync mode fails. Sleeping for a short amount of
10363
      # time is a work-around.
10364
      time.sleep(5)
10365

    
10366
    disk.RecordGrow(self.op.amount)
10367
    self.cfg.Update(instance, feedback_fn)
10368
    if self.op.wait_for_sync:
10369
      disk_abort = not _WaitForSync(self, instance, disks=[disk])
10370
      if disk_abort:
10371
        self.proc.LogWarning("Disk sync-ing has not returned a good"
10372
                             " status; please check the instance")
10373
      if not instance.admin_up:
10374
        _SafeShutdownInstanceDisks(self, instance, disks=[disk])
10375
    elif not instance.admin_up:
10376
      self.proc.LogWarning("Not shutting down the disk even if the instance is"
10377
                           " not supposed to be running because no wait for"
10378
                           " sync mode was requested")
10379

    
10380

    
10381
class LUInstanceQueryData(NoHooksLU):
10382
  """Query runtime instance data.
10383

10384
  """
10385
  REQ_BGL = False
10386

    
10387
  def ExpandNames(self):
10388
    self.needed_locks = {}
10389

    
10390
    # Use locking if requested or when non-static information is wanted
10391
    if not (self.op.static or self.op.use_locking):
10392
      self.LogWarning("Non-static data requested, locks need to be acquired")
10393
      self.op.use_locking = True
10394

    
10395
    if self.op.instances or not self.op.use_locking:
10396
      # Expand instance names right here
10397
      self.wanted_names = _GetWantedInstances(self, self.op.instances)
10398
    else:
10399
      # Will use acquired locks
10400
      self.wanted_names = None
10401

    
10402
    if self.op.use_locking:
10403
      self.share_locks = _ShareAll()
10404

    
10405
      if self.wanted_names is None:
10406
        self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
10407
      else:
10408
        self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
10409

    
10410
      self.needed_locks[locking.LEVEL_NODE] = []
10411
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
10412

    
10413
  def DeclareLocks(self, level):
10414
    if self.op.use_locking and level == locking.LEVEL_NODE:
10415
      self._LockInstancesNodes()
10416

    
10417
  def CheckPrereq(self):
10418
    """Check prerequisites.
10419

10420
    This only checks the optional instance list against the existing names.
10421

10422
    """
10423
    if self.wanted_names is None:
10424
      assert self.op.use_locking, "Locking was not used"
10425
      self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE)
10426

    
10427
    self.wanted_instances = \
10428
        map(compat.snd, self.cfg.GetMultiInstanceInfo(self.wanted_names))
10429

    
10430
  def _ComputeBlockdevStatus(self, node, instance_name, dev):
10431
    """Returns the status of a block device
10432

10433
    """
10434
    if self.op.static or not node:
10435
      return None
10436

    
10437
    self.cfg.SetDiskID(dev, node)
10438

    
10439
    result = self.rpc.call_blockdev_find(node, dev)
10440
    if result.offline:
10441
      return None
10442

    
10443
    result.Raise("Can't compute disk status for %s" % instance_name)
10444

    
10445
    status = result.payload
10446
    if status is None:
10447
      return None
10448

    
10449
    return (status.dev_path, status.major, status.minor,
10450
            status.sync_percent, status.estimated_time,
10451
            status.is_degraded, status.ldisk_status)
10452

    
10453
  def _ComputeDiskStatus(self, instance, snode, dev):
10454
    """Compute block device status.
10455

10456
    """
10457
    if dev.dev_type in constants.LDS_DRBD:
10458
      # we change the snode then (otherwise we use the one passed in)
10459
      if dev.logical_id[0] == instance.primary_node:
10460
        snode = dev.logical_id[1]
10461
      else:
10462
        snode = dev.logical_id[0]
10463

    
10464
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
10465
                                              instance.name, dev)
10466
    dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
10467

    
10468
    if dev.children:
10469
      dev_children = map(compat.partial(self._ComputeDiskStatus,
10470
                                        instance, snode),
10471
                         dev.children)
10472
    else:
10473
      dev_children = []
10474

    
10475
    return {
10476
      "iv_name": dev.iv_name,
10477
      "dev_type": dev.dev_type,
10478
      "logical_id": dev.logical_id,
10479
      "physical_id": dev.physical_id,
10480
      "pstatus": dev_pstatus,
10481
      "sstatus": dev_sstatus,
10482
      "children": dev_children,
10483
      "mode": dev.mode,
10484
      "size": dev.size,
10485
      }
10486

    
10487
  def Exec(self, feedback_fn):
10488
    """Gather and return data"""
10489
    result = {}
10490

    
10491
    cluster = self.cfg.GetClusterInfo()
10492

    
10493
    pri_nodes = self.cfg.GetMultiNodeInfo(i.primary_node
10494
                                          for i in self.wanted_instances)
10495
    for instance, (_, pnode) in zip(self.wanted_instances, pri_nodes):
10496
      if self.op.static or pnode.offline:
10497
        remote_state = None
10498
        if pnode.offline:
10499
          self.LogWarning("Primary node %s is marked offline, returning static"
10500
                          " information only for instance %s" %
10501
                          (pnode.name, instance.name))
10502
      else:
10503
        remote_info = self.rpc.call_instance_info(instance.primary_node,
10504
                                                  instance.name,
10505
                                                  instance.hypervisor)
10506
        remote_info.Raise("Error checking node %s" % instance.primary_node)
10507
        remote_info = remote_info.payload
10508
        if remote_info and "state" in remote_info:
10509
          remote_state = "up"
10510
        else:
10511
          remote_state = "down"
10512

    
10513
      if instance.admin_up:
10514
        config_state = "up"
10515
      else:
10516
        config_state = "down"
10517

    
10518
      disks = map(compat.partial(self._ComputeDiskStatus, instance, None),
10519
                  instance.disks)
10520

    
10521
      result[instance.name] = {
10522
        "name": instance.name,
10523
        "config_state": config_state,
10524
        "run_state": remote_state,
10525
        "pnode": instance.primary_node,
10526
        "snodes": instance.secondary_nodes,
10527
        "os": instance.os,
10528
        # this happens to be the same format used for hooks
10529
        "nics": _NICListToTuple(self, instance.nics),
10530
        "disk_template": instance.disk_template,
10531
        "disks": disks,
10532
        "hypervisor": instance.hypervisor,
10533
        "network_port": instance.network_port,
10534
        "hv_instance": instance.hvparams,
10535
        "hv_actual": cluster.FillHV(instance, skip_globals=True),
10536
        "be_instance": instance.beparams,
10537
        "be_actual": cluster.FillBE(instance),
10538
        "os_instance": instance.osparams,
10539
        "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
10540
        "serial_no": instance.serial_no,
10541
        "mtime": instance.mtime,
10542
        "ctime": instance.ctime,
10543
        "uuid": instance.uuid,
10544
        }
10545

    
10546
    return result
10547

    
10548

    
10549
class LUInstanceSetParams(LogicalUnit):
10550
  """Modifies an instances's parameters.
10551

10552
  """
10553
  HPATH = "instance-modify"
10554
  HTYPE = constants.HTYPE_INSTANCE
10555
  REQ_BGL = False
10556

    
10557
  def CheckArguments(self):
10558
    if not (self.op.nics or self.op.disks or self.op.disk_template or
10559
            self.op.hvparams or self.op.beparams or self.op.os_name):
10560
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
10561

    
10562
    if self.op.hvparams:
10563
      _CheckGlobalHvParams(self.op.hvparams)
10564

    
10565
    # Disk validation
10566
    disk_addremove = 0
10567
    for disk_op, disk_dict in self.op.disks:
10568
      utils.ForceDictType(disk_dict, constants.IDISK_PARAMS_TYPES)
10569
      if disk_op == constants.DDM_REMOVE:
10570
        disk_addremove += 1
10571
        continue
10572
      elif disk_op == constants.DDM_ADD:
10573
        disk_addremove += 1
10574
      else:
10575
        if not isinstance(disk_op, int):
10576
          raise errors.OpPrereqError("Invalid disk index", errors.ECODE_INVAL)
10577
        if not isinstance(disk_dict, dict):
10578
          msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
10579
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
10580

    
10581
      if disk_op == constants.DDM_ADD:
10582
        mode = disk_dict.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
10583
        if mode not in constants.DISK_ACCESS_SET:
10584
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
10585
                                     errors.ECODE_INVAL)
10586
        size = disk_dict.get(constants.IDISK_SIZE, None)
10587
        if size is None:
10588
          raise errors.OpPrereqError("Required disk parameter size missing",
10589
                                     errors.ECODE_INVAL)
10590
        try:
10591
          size = int(size)
10592
        except (TypeError, ValueError), err:
10593
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
10594
                                     str(err), errors.ECODE_INVAL)
10595
        disk_dict[constants.IDISK_SIZE] = size
10596
      else:
10597
        # modification of disk
10598
        if constants.IDISK_SIZE in disk_dict:
10599
          raise errors.OpPrereqError("Disk size change not possible, use"
10600
                                     " grow-disk", errors.ECODE_INVAL)
10601

    
10602
    if disk_addremove > 1:
10603
      raise errors.OpPrereqError("Only one disk add or remove operation"
10604
                                 " supported at a time", errors.ECODE_INVAL)
10605

    
10606
    if self.op.disks and self.op.disk_template is not None:
10607
      raise errors.OpPrereqError("Disk template conversion and other disk"
10608
                                 " changes not supported at the same time",
10609
                                 errors.ECODE_INVAL)
10610

    
10611
    if (self.op.disk_template and
10612
        self.op.disk_template in constants.DTS_INT_MIRROR and
10613
        self.op.remote_node is None):
10614
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
10615
                                 " one requires specifying a secondary node",
10616
                                 errors.ECODE_INVAL)
10617

    
10618
    # NIC validation
10619
    nic_addremove = 0
10620
    for nic_op, nic_dict in self.op.nics:
10621
      utils.ForceDictType(nic_dict, constants.INIC_PARAMS_TYPES)
10622
      if nic_op == constants.DDM_REMOVE:
10623
        nic_addremove += 1
10624
        continue
10625
      elif nic_op == constants.DDM_ADD:
10626
        nic_addremove += 1
10627
      else:
10628
        if not isinstance(nic_op, int):
10629
          raise errors.OpPrereqError("Invalid nic index", errors.ECODE_INVAL)
10630
        if not isinstance(nic_dict, dict):
10631
          msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
10632
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
10633

    
10634
      # nic_dict should be a dict
10635
      nic_ip = nic_dict.get(constants.INIC_IP, None)
10636
      if nic_ip is not None:
10637
        if nic_ip.lower() == constants.VALUE_NONE:
10638
          nic_dict[constants.INIC_IP] = None
10639
        else:
10640
          if not netutils.IPAddress.IsValid(nic_ip):
10641
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip,
10642
                                       errors.ECODE_INVAL)
10643

    
10644
      nic_bridge = nic_dict.get("bridge", None)
10645
      nic_link = nic_dict.get(constants.INIC_LINK, None)
10646
      if nic_bridge and nic_link:
10647
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
10648
                                   " at the same time", errors.ECODE_INVAL)
10649
      elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
10650
        nic_dict["bridge"] = None
10651
      elif nic_link and nic_link.lower() == constants.VALUE_NONE:
10652
        nic_dict[constants.INIC_LINK] = None
10653

    
10654
      if nic_op == constants.DDM_ADD:
10655
        nic_mac = nic_dict.get(constants.INIC_MAC, None)
10656
        if nic_mac is None:
10657
          nic_dict[constants.INIC_MAC] = constants.VALUE_AUTO
10658

    
10659
      if constants.INIC_MAC in nic_dict:
10660
        nic_mac = nic_dict[constants.INIC_MAC]
10661
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
10662
          nic_mac = utils.NormalizeAndValidateMac(nic_mac)
10663

    
10664
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
10665
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
10666
                                     " modifying an existing nic",
10667
                                     errors.ECODE_INVAL)
10668

    
10669
    if nic_addremove > 1:
10670
      raise errors.OpPrereqError("Only one NIC add or remove operation"
10671
                                 " supported at a time", errors.ECODE_INVAL)
10672

    
10673
  def ExpandNames(self):
10674
    self._ExpandAndLockInstance()
10675
    self.needed_locks[locking.LEVEL_NODE] = []
10676
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
10677

    
10678
  def DeclareLocks(self, level):
10679
    if level == locking.LEVEL_NODE:
10680
      self._LockInstancesNodes()
10681
      if self.op.disk_template and self.op.remote_node:
10682
        self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
10683
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
10684

    
10685
  def BuildHooksEnv(self):
10686
    """Build hooks env.
10687

10688
    This runs on the master, primary and secondaries.
10689

10690
    """
10691
    args = dict()
10692
    if constants.BE_MEMORY in self.be_new:
10693
      args["memory"] = self.be_new[constants.BE_MEMORY]
10694
    if constants.BE_VCPUS in self.be_new:
10695
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
10696
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
10697
    # information at all.
10698
    if self.op.nics:
10699
      args["nics"] = []
10700
      nic_override = dict(self.op.nics)
10701
      for idx, nic in enumerate(self.instance.nics):
10702
        if idx in nic_override:
10703
          this_nic_override = nic_override[idx]
10704
        else:
10705
          this_nic_override = {}
10706
        if constants.INIC_IP in this_nic_override:
10707
          ip = this_nic_override[constants.INIC_IP]
10708
        else:
10709
          ip = nic.ip
10710
        if constants.INIC_MAC in this_nic_override:
10711
          mac = this_nic_override[constants.INIC_MAC]
10712
        else:
10713
          mac = nic.mac
10714
        if idx in self.nic_pnew:
10715
          nicparams = self.nic_pnew[idx]
10716
        else:
10717
          nicparams = self.cluster.SimpleFillNIC(nic.nicparams)
10718
        mode = nicparams[constants.NIC_MODE]
10719
        link = nicparams[constants.NIC_LINK]
10720
        args["nics"].append((ip, mac, mode, link))
10721
      if constants.DDM_ADD in nic_override:
10722
        ip = nic_override[constants.DDM_ADD].get(constants.INIC_IP, None)
10723
        mac = nic_override[constants.DDM_ADD][constants.INIC_MAC]
10724
        nicparams = self.nic_pnew[constants.DDM_ADD]
10725
        mode = nicparams[constants.NIC_MODE]
10726
        link = nicparams[constants.NIC_LINK]
10727
        args["nics"].append((ip, mac, mode, link))
10728
      elif constants.DDM_REMOVE in nic_override:
10729
        del args["nics"][-1]
10730

    
10731
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
10732
    if self.op.disk_template:
10733
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
10734

    
10735
    return env
10736

    
10737
  def BuildHooksNodes(self):
10738
    """Build hooks nodes.
10739

10740
    """
10741
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
10742
    return (nl, nl)
10743

    
10744
  def CheckPrereq(self):
10745
    """Check prerequisites.
10746

10747
    This only checks the instance list against the existing names.
10748

10749
    """
10750
    # checking the new params on the primary/secondary nodes
10751

    
10752
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
10753
    cluster = self.cluster = self.cfg.GetClusterInfo()
10754
    assert self.instance is not None, \
10755
      "Cannot retrieve locked instance %s" % self.op.instance_name
10756
    pnode = instance.primary_node
10757
    nodelist = list(instance.all_nodes)
10758

    
10759
    # OS change
10760
    if self.op.os_name and not self.op.force:
10761
      _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
10762
                      self.op.force_variant)
10763
      instance_os = self.op.os_name
10764
    else:
10765
      instance_os = instance.os
10766

    
10767
    if self.op.disk_template:
10768
      if instance.disk_template == self.op.disk_template:
10769
        raise errors.OpPrereqError("Instance already has disk template %s" %
10770
                                   instance.disk_template, errors.ECODE_INVAL)
10771

    
10772
      if (instance.disk_template,
10773
          self.op.disk_template) not in self._DISK_CONVERSIONS:
10774
        raise errors.OpPrereqError("Unsupported disk template conversion from"
10775
                                   " %s to %s" % (instance.disk_template,
10776
                                                  self.op.disk_template),
10777
                                   errors.ECODE_INVAL)
10778
      _CheckInstanceDown(self, instance, "cannot change disk template")
10779
      if self.op.disk_template in constants.DTS_INT_MIRROR:
10780
        if self.op.remote_node == pnode:
10781
          raise errors.OpPrereqError("Given new secondary node %s is the same"
10782
                                     " as the primary node of the instance" %
10783
                                     self.op.remote_node, errors.ECODE_STATE)
10784
        _CheckNodeOnline(self, self.op.remote_node)
10785
        _CheckNodeNotDrained(self, self.op.remote_node)
10786
        # FIXME: here we assume that the old instance type is DT_PLAIN
10787
        assert instance.disk_template == constants.DT_PLAIN
10788
        disks = [{constants.IDISK_SIZE: d.size,
10789
                  constants.IDISK_VG: d.logical_id[0]}
10790
                 for d in instance.disks]
10791
        required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
10792
        _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
10793

    
10794
    # hvparams processing
10795
    if self.op.hvparams:
10796
      hv_type = instance.hypervisor
10797
      i_hvdict = _GetUpdatedParams(instance.hvparams, self.op.hvparams)
10798
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
10799
      hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
10800

    
10801
      # local check
10802
      hypervisor.GetHypervisor(hv_type).CheckParameterSyntax(hv_new)
10803
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
10804
      self.hv_new = hv_new # the new actual values
10805
      self.hv_inst = i_hvdict # the new dict (without defaults)
10806
    else:
10807
      self.hv_new = self.hv_inst = {}
10808

    
10809
    # beparams processing
10810
    if self.op.beparams:
10811
      i_bedict = _GetUpdatedParams(instance.beparams, self.op.beparams,
10812
                                   use_none=True)
10813
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
10814
      be_new = cluster.SimpleFillBE(i_bedict)
10815
      self.be_new = be_new # the new actual values
10816
      self.be_inst = i_bedict # the new dict (without defaults)
10817
    else:
10818
      self.be_new = self.be_inst = {}
10819
    be_old = cluster.FillBE(instance)
10820

    
10821
    # osparams processing
10822
    if self.op.osparams:
10823
      i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
10824
      _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
10825
      self.os_inst = i_osdict # the new dict (without defaults)
10826
    else:
10827
      self.os_inst = {}
10828

    
10829
    self.warn = []
10830

    
10831
    if (constants.BE_MEMORY in self.op.beparams and not self.op.force and
10832
        be_new[constants.BE_MEMORY] > be_old[constants.BE_MEMORY]):
10833
      mem_check_list = [pnode]
10834
      if be_new[constants.BE_AUTO_BALANCE]:
10835
        # either we changed auto_balance to yes or it was from before
10836
        mem_check_list.extend(instance.secondary_nodes)
10837
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
10838
                                                  instance.hypervisor)
10839
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
10840
                                         instance.hypervisor)
10841
      pninfo = nodeinfo[pnode]
10842
      msg = pninfo.fail_msg
10843
      if msg:
10844
        # Assume the primary node is unreachable and go ahead
10845
        self.warn.append("Can't get info from primary node %s: %s" %
10846
                         (pnode, msg))
10847
      elif not isinstance(pninfo.payload.get("memory_free", None), int):
10848
        self.warn.append("Node data from primary node %s doesn't contain"
10849
                         " free memory information" % pnode)
10850
      elif instance_info.fail_msg:
10851
        self.warn.append("Can't get instance runtime information: %s" %
10852
                        instance_info.fail_msg)
10853
      else:
10854
        if instance_info.payload:
10855
          current_mem = int(instance_info.payload["memory"])
10856
        else:
10857
          # Assume instance not running
10858
          # (there is a slight race condition here, but it's not very probable,
10859
          # and we have no other way to check)
10860
          current_mem = 0
10861
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
10862
                    pninfo.payload["memory_free"])
10863
        if miss_mem > 0:
10864
          raise errors.OpPrereqError("This change will prevent the instance"
10865
                                     " from starting, due to %d MB of memory"
10866
                                     " missing on its primary node" % miss_mem,
10867
                                     errors.ECODE_NORES)
10868

    
10869
      if be_new[constants.BE_AUTO_BALANCE]:
10870
        for node, nres in nodeinfo.items():
10871
          if node not in instance.secondary_nodes:
10872
            continue
10873
          nres.Raise("Can't get info from secondary node %s" % node,
10874
                     prereq=True, ecode=errors.ECODE_STATE)
10875
          if not isinstance(nres.payload.get("memory_free", None), int):
10876
            raise errors.OpPrereqError("Secondary node %s didn't return free"
10877
                                       " memory information" % node,
10878
                                       errors.ECODE_STATE)
10879
          elif be_new[constants.BE_MEMORY] > nres.payload["memory_free"]:
10880
            raise errors.OpPrereqError("This change will prevent the instance"
10881
                                       " from failover to its secondary node"
10882
                                       " %s, due to not enough memory" % node,
10883
                                       errors.ECODE_STATE)
10884

    
10885
    # NIC processing
10886
    self.nic_pnew = {}
10887
    self.nic_pinst = {}
10888
    for nic_op, nic_dict in self.op.nics:
10889
      if nic_op == constants.DDM_REMOVE:
10890
        if not instance.nics:
10891
          raise errors.OpPrereqError("Instance has no NICs, cannot remove",
10892
                                     errors.ECODE_INVAL)
10893
        continue
10894
      if nic_op != constants.DDM_ADD:
10895
        # an existing nic
10896
        if not instance.nics:
10897
          raise errors.OpPrereqError("Invalid NIC index %s, instance has"
10898
                                     " no NICs" % nic_op,
10899
                                     errors.ECODE_INVAL)
10900
        if nic_op < 0 or nic_op >= len(instance.nics):
10901
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
10902
                                     " are 0 to %d" %
10903
                                     (nic_op, len(instance.nics) - 1),
10904
                                     errors.ECODE_INVAL)
10905
        old_nic_params = instance.nics[nic_op].nicparams
10906
        old_nic_ip = instance.nics[nic_op].ip
10907
      else:
10908
        old_nic_params = {}
10909
        old_nic_ip = None
10910

    
10911
      update_params_dict = dict([(key, nic_dict[key])
10912
                                 for key in constants.NICS_PARAMETERS
10913
                                 if key in nic_dict])
10914

    
10915
      if "bridge" in nic_dict:
10916
        update_params_dict[constants.NIC_LINK] = nic_dict["bridge"]
10917

    
10918
      new_nic_params = _GetUpdatedParams(old_nic_params,
10919
                                         update_params_dict)
10920
      utils.ForceDictType(new_nic_params, constants.NICS_PARAMETER_TYPES)
10921
      new_filled_nic_params = cluster.SimpleFillNIC(new_nic_params)
10922
      objects.NIC.CheckParameterSyntax(new_filled_nic_params)
10923
      self.nic_pinst[nic_op] = new_nic_params
10924
      self.nic_pnew[nic_op] = new_filled_nic_params
10925
      new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
10926

    
10927
      if new_nic_mode == constants.NIC_MODE_BRIDGED:
10928
        nic_bridge = new_filled_nic_params[constants.NIC_LINK]
10929
        msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
10930
        if msg:
10931
          msg = "Error checking bridges on node %s: %s" % (pnode, msg)
10932
          if self.op.force:
10933
            self.warn.append(msg)
10934
          else:
10935
            raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
10936
      if new_nic_mode == constants.NIC_MODE_ROUTED:
10937
        if constants.INIC_IP in nic_dict:
10938
          nic_ip = nic_dict[constants.INIC_IP]
10939
        else:
10940
          nic_ip = old_nic_ip
10941
        if nic_ip is None:
10942
          raise errors.OpPrereqError("Cannot set the nic ip to None"
10943
                                     " on a routed nic", errors.ECODE_INVAL)
10944
      if constants.INIC_MAC in nic_dict:
10945
        nic_mac = nic_dict[constants.INIC_MAC]
10946
        if nic_mac is None:
10947
          raise errors.OpPrereqError("Cannot set the nic mac to None",
10948
                                     errors.ECODE_INVAL)
10949
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
10950
          # otherwise generate the mac
10951
          nic_dict[constants.INIC_MAC] = \
10952
            self.cfg.GenerateMAC(self.proc.GetECId())
10953
        else:
10954
          # or validate/reserve the current one
10955
          try:
10956
            self.cfg.ReserveMAC(nic_mac, self.proc.GetECId())
10957
          except errors.ReservationError:
10958
            raise errors.OpPrereqError("MAC address %s already in use"
10959
                                       " in cluster" % nic_mac,
10960
                                       errors.ECODE_NOTUNIQUE)
10961

    
10962
    # DISK processing
10963
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
10964
      raise errors.OpPrereqError("Disk operations not supported for"
10965
                                 " diskless instances",
10966
                                 errors.ECODE_INVAL)
10967
    for disk_op, _ in self.op.disks:
10968
      if disk_op == constants.DDM_REMOVE:
10969
        if len(instance.disks) == 1:
10970
          raise errors.OpPrereqError("Cannot remove the last disk of"
10971
                                     " an instance", errors.ECODE_INVAL)
10972
        _CheckInstanceDown(self, instance, "cannot remove disks")
10973

    
10974
      if (disk_op == constants.DDM_ADD and
10975
          len(instance.disks) >= constants.MAX_DISKS):
10976
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
10977
                                   " add more" % constants.MAX_DISKS,
10978
                                   errors.ECODE_STATE)
10979
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
10980
        # an existing disk
10981
        if disk_op < 0 or disk_op >= len(instance.disks):
10982
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
10983
                                     " are 0 to %d" %
10984
                                     (disk_op, len(instance.disks)),
10985
                                     errors.ECODE_INVAL)
10986

    
10987
    return
10988

    
10989
  def _ConvertPlainToDrbd(self, feedback_fn):
10990
    """Converts an instance from plain to drbd.
10991

10992
    """
10993
    feedback_fn("Converting template to drbd")
10994
    instance = self.instance
10995
    pnode = instance.primary_node
10996
    snode = self.op.remote_node
10997

    
10998
    # create a fake disk info for _GenerateDiskTemplate
10999
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
11000
                  constants.IDISK_VG: d.logical_id[0]}
11001
                 for d in instance.disks]
11002
    new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
11003
                                      instance.name, pnode, [snode],
11004
                                      disk_info, None, None, 0, feedback_fn)
11005
    info = _GetInstanceInfoText(instance)
11006
    feedback_fn("Creating aditional volumes...")
11007
    # first, create the missing data and meta devices
11008
    for disk in new_disks:
11009
      # unfortunately this is... not too nice
11010
      _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
11011
                            info, True)
11012
      for child in disk.children:
11013
        _CreateSingleBlockDev(self, snode, instance, child, info, True)
11014
    # at this stage, all new LVs have been created, we can rename the
11015
    # old ones
11016
    feedback_fn("Renaming original volumes...")
11017
    rename_list = [(o, n.children[0].logical_id)
11018
                   for (o, n) in zip(instance.disks, new_disks)]
11019
    result = self.rpc.call_blockdev_rename(pnode, rename_list)
11020
    result.Raise("Failed to rename original LVs")
11021

    
11022
    feedback_fn("Initializing DRBD devices...")
11023
    # all child devices are in place, we can now create the DRBD devices
11024
    for disk in new_disks:
11025
      for node in [pnode, snode]:
11026
        f_create = node == pnode
11027
        _CreateSingleBlockDev(self, node, instance, disk, info, f_create)
11028

    
11029
    # at this point, the instance has been modified
11030
    instance.disk_template = constants.DT_DRBD8
11031
    instance.disks = new_disks
11032
    self.cfg.Update(instance, feedback_fn)
11033

    
11034
    # disks are created, waiting for sync
11035
    disk_abort = not _WaitForSync(self, instance,
11036
                                  oneshot=not self.op.wait_for_sync)
11037
    if disk_abort:
11038
      raise errors.OpExecError("There are some degraded disks for"
11039
                               " this instance, please cleanup manually")
11040

    
11041
  def _ConvertDrbdToPlain(self, feedback_fn):
11042
    """Converts an instance from drbd to plain.
11043

11044
    """
11045
    instance = self.instance
11046
    assert len(instance.secondary_nodes) == 1
11047
    pnode = instance.primary_node
11048
    snode = instance.secondary_nodes[0]
11049
    feedback_fn("Converting template to plain")
11050

    
11051
    old_disks = instance.disks
11052
    new_disks = [d.children[0] for d in old_disks]
11053

    
11054
    # copy over size and mode
11055
    for parent, child in zip(old_disks, new_disks):
11056
      child.size = parent.size
11057
      child.mode = parent.mode
11058

    
11059
    # update instance structure
11060
    instance.disks = new_disks
11061
    instance.disk_template = constants.DT_PLAIN
11062
    self.cfg.Update(instance, feedback_fn)
11063

    
11064
    feedback_fn("Removing volumes on the secondary node...")
11065
    for disk in old_disks:
11066
      self.cfg.SetDiskID(disk, snode)
11067
      msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
11068
      if msg:
11069
        self.LogWarning("Could not remove block device %s on node %s,"
11070
                        " continuing anyway: %s", disk.iv_name, snode, msg)
11071

    
11072
    feedback_fn("Removing unneeded volumes on the primary node...")
11073
    for idx, disk in enumerate(old_disks):
11074
      meta = disk.children[1]
11075
      self.cfg.SetDiskID(meta, pnode)
11076
      msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
11077
      if msg:
11078
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
11079
                        " continuing anyway: %s", idx, pnode, msg)
11080

    
11081
  def Exec(self, feedback_fn):
11082
    """Modifies an instance.
11083

11084
    All parameters take effect only at the next restart of the instance.
11085

11086
    """
11087
    # Process here the warnings from CheckPrereq, as we don't have a
11088
    # feedback_fn there.
11089
    for warn in self.warn:
11090
      feedback_fn("WARNING: %s" % warn)
11091

    
11092
    result = []
11093
    instance = self.instance
11094
    # disk changes
11095
    for disk_op, disk_dict in self.op.disks:
11096
      if disk_op == constants.DDM_REMOVE:
11097
        # remove the last disk
11098
        device = instance.disks.pop()
11099
        device_idx = len(instance.disks)
11100
        for node, disk in device.ComputeNodeTree(instance.primary_node):
11101
          self.cfg.SetDiskID(disk, node)
11102
          msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
11103
          if msg:
11104
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
11105
                            " continuing anyway", device_idx, node, msg)
11106
        result.append(("disk/%d" % device_idx, "remove"))
11107
      elif disk_op == constants.DDM_ADD:
11108
        # add a new disk
11109
        if instance.disk_template in (constants.DT_FILE,
11110
                                        constants.DT_SHARED_FILE):
11111
          file_driver, file_path = instance.disks[0].logical_id
11112
          file_path = os.path.dirname(file_path)
11113
        else:
11114
          file_driver = file_path = None
11115
        disk_idx_base = len(instance.disks)
11116
        new_disk = _GenerateDiskTemplate(self,
11117
                                         instance.disk_template,
11118
                                         instance.name, instance.primary_node,
11119
                                         instance.secondary_nodes,
11120
                                         [disk_dict],
11121
                                         file_path,
11122
                                         file_driver,
11123
                                         disk_idx_base, feedback_fn)[0]
11124
        instance.disks.append(new_disk)
11125
        info = _GetInstanceInfoText(instance)
11126

    
11127
        logging.info("Creating volume %s for instance %s",
11128
                     new_disk.iv_name, instance.name)
11129
        # Note: this needs to be kept in sync with _CreateDisks
11130
        #HARDCODE
11131
        for node in instance.all_nodes:
11132
          f_create = node == instance.primary_node
11133
          try:
11134
            _CreateBlockDev(self, node, instance, new_disk,
11135
                            f_create, info, f_create)
11136
          except errors.OpExecError, err:
11137
            self.LogWarning("Failed to create volume %s (%s) on"
11138
                            " node %s: %s",
11139
                            new_disk.iv_name, new_disk, node, err)
11140
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
11141
                       (new_disk.size, new_disk.mode)))
11142
      else:
11143
        # change a given disk
11144
        instance.disks[disk_op].mode = disk_dict[constants.IDISK_MODE]
11145
        result.append(("disk.mode/%d" % disk_op,
11146
                       disk_dict[constants.IDISK_MODE]))
11147

    
11148
    if self.op.disk_template:
11149
      r_shut = _ShutdownInstanceDisks(self, instance)
11150
      if not r_shut:
11151
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
11152
                                 " proceed with disk template conversion")
11153
      mode = (instance.disk_template, self.op.disk_template)
11154
      try:
11155
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
11156
      except:
11157
        self.cfg.ReleaseDRBDMinors(instance.name)
11158
        raise
11159
      result.append(("disk_template", self.op.disk_template))
11160

    
11161
    # NIC changes
11162
    for nic_op, nic_dict in self.op.nics:
11163
      if nic_op == constants.DDM_REMOVE:
11164
        # remove the last nic
11165
        del instance.nics[-1]
11166
        result.append(("nic.%d" % len(instance.nics), "remove"))
11167
      elif nic_op == constants.DDM_ADD:
11168
        # mac and bridge should be set, by now
11169
        mac = nic_dict[constants.INIC_MAC]
11170
        ip = nic_dict.get(constants.INIC_IP, None)
11171
        nicparams = self.nic_pinst[constants.DDM_ADD]
11172
        new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
11173
        instance.nics.append(new_nic)
11174
        result.append(("nic.%d" % (len(instance.nics) - 1),
11175
                       "add:mac=%s,ip=%s,mode=%s,link=%s" %
11176
                       (new_nic.mac, new_nic.ip,
11177
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
11178
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
11179
                       )))
11180
      else:
11181
        for key in (constants.INIC_MAC, constants.INIC_IP):
11182
          if key in nic_dict:
11183
            setattr(instance.nics[nic_op], key, nic_dict[key])
11184
        if nic_op in self.nic_pinst:
11185
          instance.nics[nic_op].nicparams = self.nic_pinst[nic_op]
11186
        for key, val in nic_dict.iteritems():
11187
          result.append(("nic.%s/%d" % (key, nic_op), val))
11188

    
11189
    # hvparams changes
11190
    if self.op.hvparams:
11191
      instance.hvparams = self.hv_inst
11192
      for key, val in self.op.hvparams.iteritems():
11193
        result.append(("hv/%s" % key, val))
11194

    
11195
    # beparams changes
11196
    if self.op.beparams:
11197
      instance.beparams = self.be_inst
11198
      for key, val in self.op.beparams.iteritems():
11199
        result.append(("be/%s" % key, val))
11200

    
11201
    # OS change
11202
    if self.op.os_name:
11203
      instance.os = self.op.os_name
11204

    
11205
    # osparams changes
11206
    if self.op.osparams:
11207
      instance.osparams = self.os_inst
11208
      for key, val in self.op.osparams.iteritems():
11209
        result.append(("os/%s" % key, val))
11210

    
11211
    self.cfg.Update(instance, feedback_fn)
11212

    
11213
    return result
11214

    
11215
  _DISK_CONVERSIONS = {
11216
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
11217
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
11218
    }
11219

    
11220

    
11221
class LUInstanceChangeGroup(LogicalUnit):
11222
  HPATH = "instance-change-group"
11223
  HTYPE = constants.HTYPE_INSTANCE
11224
  REQ_BGL = False
11225

    
11226
  def ExpandNames(self):
11227
    self.share_locks = _ShareAll()
11228
    self.needed_locks = {
11229
      locking.LEVEL_NODEGROUP: [],
11230
      locking.LEVEL_NODE: [],
11231
      }
11232

    
11233
    self._ExpandAndLockInstance()
11234

    
11235
    if self.op.target_groups:
11236
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
11237
                                  self.op.target_groups)
11238
    else:
11239
      self.req_target_uuids = None
11240

    
11241
    self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
11242

    
11243
  def DeclareLocks(self, level):
11244
    if level == locking.LEVEL_NODEGROUP:
11245
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
11246

    
11247
      if self.req_target_uuids:
11248
        lock_groups = set(self.req_target_uuids)
11249

    
11250
        # Lock all groups used by instance optimistically; this requires going
11251
        # via the node before it's locked, requiring verification later on
11252
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
11253
        lock_groups.update(instance_groups)
11254
      else:
11255
        # No target groups, need to lock all of them
11256
        lock_groups = locking.ALL_SET
11257

    
11258
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
11259

    
11260
    elif level == locking.LEVEL_NODE:
11261
      if self.req_target_uuids:
11262
        # Lock all nodes used by instances
11263
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
11264
        self._LockInstancesNodes()
11265

    
11266
        # Lock all nodes in all potential target groups
11267
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
11268
                       self.cfg.GetInstanceNodeGroups(self.op.instance_name))
11269
        member_nodes = [node_name
11270
                        for group in lock_groups
11271
                        for node_name in self.cfg.GetNodeGroup(group).members]
11272
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
11273
      else:
11274
        # Lock all nodes as all groups are potential targets
11275
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
11276

    
11277
  def CheckPrereq(self):
11278
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
11279
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
11280
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
11281

    
11282
    assert (self.req_target_uuids is None or
11283
            owned_groups.issuperset(self.req_target_uuids))
11284
    assert owned_instances == set([self.op.instance_name])
11285

    
11286
    # Get instance information
11287
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
11288

    
11289
    # Check if node groups for locked instance are still correct
11290
    assert owned_nodes.issuperset(self.instance.all_nodes), \
11291
      ("Instance %s's nodes changed while we kept the lock" %
11292
       self.op.instance_name)
11293

    
11294
    inst_groups = _CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
11295
                                           owned_groups)
11296

    
11297
    if self.req_target_uuids:
11298
      # User requested specific target groups
11299
      self.target_uuids = self.req_target_uuids
11300
    else:
11301
      # All groups except those used by the instance are potential targets
11302
      self.target_uuids = owned_groups - inst_groups
11303

    
11304
    conflicting_groups = self.target_uuids & inst_groups
11305
    if conflicting_groups:
11306
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
11307
                                 " used by the instance '%s'" %
11308
                                 (utils.CommaJoin(conflicting_groups),
11309
                                  self.op.instance_name),
11310
                                 errors.ECODE_INVAL)
11311

    
11312
    if not self.target_uuids:
11313
      raise errors.OpPrereqError("There are no possible target groups",
11314
                                 errors.ECODE_INVAL)
11315

    
11316
  def BuildHooksEnv(self):
11317
    """Build hooks env.
11318

11319
    """
11320
    assert self.target_uuids
11321

    
11322
    env = {
11323
      "TARGET_GROUPS": " ".join(self.target_uuids),
11324
      }
11325

    
11326
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
11327

    
11328
    return env
11329

    
11330
  def BuildHooksNodes(self):
11331
    """Build hooks nodes.
11332

11333
    """
11334
    mn = self.cfg.GetMasterNode()
11335
    return ([mn], [mn])
11336

    
11337
  def Exec(self, feedback_fn):
11338
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
11339

    
11340
    assert instances == [self.op.instance_name], "Instance not locked"
11341

    
11342
    ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_CHG_GROUP,
11343
                     instances=instances, target_groups=list(self.target_uuids))
11344

    
11345
    ial.Run(self.op.iallocator)
11346

    
11347
    if not ial.success:
11348
      raise errors.OpPrereqError("Can't compute solution for changing group of"
11349
                                 " instance '%s' using iallocator '%s': %s" %
11350
                                 (self.op.instance_name, self.op.iallocator,
11351
                                  ial.info),
11352
                                 errors.ECODE_NORES)
11353

    
11354
    jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
11355

    
11356
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
11357
                 " instance '%s'", len(jobs), self.op.instance_name)
11358

    
11359
    return ResultWithJobs(jobs)
11360

    
11361

    
11362
class LUBackupQuery(NoHooksLU):
11363
  """Query the exports list
11364

11365
  """
11366
  REQ_BGL = False
11367

    
11368
  def ExpandNames(self):
11369
    self.needed_locks = {}
11370
    self.share_locks[locking.LEVEL_NODE] = 1
11371
    if not self.op.nodes:
11372
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
11373
    else:
11374
      self.needed_locks[locking.LEVEL_NODE] = \
11375
        _GetWantedNodes(self, self.op.nodes)
11376

    
11377
  def Exec(self, feedback_fn):
11378
    """Compute the list of all the exported system images.
11379

11380
    @rtype: dict
11381
    @return: a dictionary with the structure node->(export-list)
11382
        where export-list is a list of the instances exported on
11383
        that node.
11384

11385
    """
11386
    self.nodes = self.owned_locks(locking.LEVEL_NODE)
11387
    rpcresult = self.rpc.call_export_list(self.nodes)
11388
    result = {}
11389
    for node in rpcresult:
11390
      if rpcresult[node].fail_msg:
11391
        result[node] = False
11392
      else:
11393
        result[node] = rpcresult[node].payload
11394

    
11395
    return result
11396

    
11397

    
11398
class LUBackupPrepare(NoHooksLU):
11399
  """Prepares an instance for an export and returns useful information.
11400

11401
  """
11402
  REQ_BGL = False
11403

    
11404
  def ExpandNames(self):
11405
    self._ExpandAndLockInstance()
11406

    
11407
  def CheckPrereq(self):
11408
    """Check prerequisites.
11409

11410
    """
11411
    instance_name = self.op.instance_name
11412

    
11413
    self.instance = self.cfg.GetInstanceInfo(instance_name)
11414
    assert self.instance is not None, \
11415
          "Cannot retrieve locked instance %s" % self.op.instance_name
11416
    _CheckNodeOnline(self, self.instance.primary_node)
11417

    
11418
    self._cds = _GetClusterDomainSecret()
11419

    
11420
  def Exec(self, feedback_fn):
11421
    """Prepares an instance for an export.
11422

11423
    """
11424
    instance = self.instance
11425

    
11426
    if self.op.mode == constants.EXPORT_MODE_REMOTE:
11427
      salt = utils.GenerateSecret(8)
11428

    
11429
      feedback_fn("Generating X509 certificate on %s" % instance.primary_node)
11430
      result = self.rpc.call_x509_cert_create(instance.primary_node,
11431
                                              constants.RIE_CERT_VALIDITY)
11432
      result.Raise("Can't create X509 key and certificate on %s" % result.node)
11433

    
11434
      (name, cert_pem) = result.payload
11435

    
11436
      cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
11437
                                             cert_pem)
11438

    
11439
      return {
11440
        "handshake": masterd.instance.ComputeRemoteExportHandshake(self._cds),
11441
        "x509_key_name": (name, utils.Sha1Hmac(self._cds, name, salt=salt),
11442
                          salt),
11443
        "x509_ca": utils.SignX509Certificate(cert, self._cds, salt),
11444
        }
11445

    
11446
    return None
11447

    
11448

    
11449
class LUBackupExport(LogicalUnit):
11450
  """Export an instance to an image in the cluster.
11451

11452
  """
11453
  HPATH = "instance-export"
11454
  HTYPE = constants.HTYPE_INSTANCE
11455
  REQ_BGL = False
11456

    
11457
  def CheckArguments(self):
11458
    """Check the arguments.
11459

11460
    """
11461
    self.x509_key_name = self.op.x509_key_name
11462
    self.dest_x509_ca_pem = self.op.destination_x509_ca
11463

    
11464
    if self.op.mode == constants.EXPORT_MODE_REMOTE:
11465
      if not self.x509_key_name:
11466
        raise errors.OpPrereqError("Missing X509 key name for encryption",
11467
                                   errors.ECODE_INVAL)
11468

    
11469
      if not self.dest_x509_ca_pem:
11470
        raise errors.OpPrereqError("Missing destination X509 CA",
11471
                                   errors.ECODE_INVAL)
11472

    
11473
  def ExpandNames(self):
11474
    self._ExpandAndLockInstance()
11475

    
11476
    # Lock all nodes for local exports
11477
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
11478
      # FIXME: lock only instance primary and destination node
11479
      #
11480
      # Sad but true, for now we have do lock all nodes, as we don't know where
11481
      # the previous export might be, and in this LU we search for it and
11482
      # remove it from its current node. In the future we could fix this by:
11483
      #  - making a tasklet to search (share-lock all), then create the
11484
      #    new one, then one to remove, after
11485
      #  - removing the removal operation altogether
11486
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
11487

    
11488
  def DeclareLocks(self, level):
11489
    """Last minute lock declaration."""
11490
    # All nodes are locked anyway, so nothing to do here.
11491

    
11492
  def BuildHooksEnv(self):
11493
    """Build hooks env.
11494

11495
    This will run on the master, primary node and target node.
11496

11497
    """
11498
    env = {
11499
      "EXPORT_MODE": self.op.mode,
11500
      "EXPORT_NODE": self.op.target_node,
11501
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
11502
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
11503
      # TODO: Generic function for boolean env variables
11504
      "REMOVE_INSTANCE": str(bool(self.op.remove_instance)),
11505
      }
11506

    
11507
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
11508

    
11509
    return env
11510

    
11511
  def BuildHooksNodes(self):
11512
    """Build hooks nodes.
11513

11514
    """
11515
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node]
11516

    
11517
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
11518
      nl.append(self.op.target_node)
11519

    
11520
    return (nl, nl)
11521

    
11522
  def CheckPrereq(self):
11523
    """Check prerequisites.
11524

11525
    This checks that the instance and node names are valid.
11526

11527
    """
11528
    instance_name = self.op.instance_name
11529

    
11530
    self.instance = self.cfg.GetInstanceInfo(instance_name)
11531
    assert self.instance is not None, \
11532
          "Cannot retrieve locked instance %s" % self.op.instance_name
11533
    _CheckNodeOnline(self, self.instance.primary_node)
11534

    
11535
    if (self.op.remove_instance and self.instance.admin_up and
11536
        not self.op.shutdown):
11537
      raise errors.OpPrereqError("Can not remove instance without shutting it"
11538
                                 " down before")
11539

    
11540
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
11541
      self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
11542
      self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
11543
      assert self.dst_node is not None
11544

    
11545
      _CheckNodeOnline(self, self.dst_node.name)
11546
      _CheckNodeNotDrained(self, self.dst_node.name)
11547

    
11548
      self._cds = None
11549
      self.dest_disk_info = None
11550
      self.dest_x509_ca = None
11551

    
11552
    elif self.op.mode == constants.EXPORT_MODE_REMOTE:
11553
      self.dst_node = None
11554

    
11555
      if len(self.op.target_node) != len(self.instance.disks):
11556
        raise errors.OpPrereqError(("Received destination information for %s"
11557
                                    " disks, but instance %s has %s disks") %
11558
                                   (len(self.op.target_node), instance_name,
11559
                                    len(self.instance.disks)),
11560
                                   errors.ECODE_INVAL)
11561

    
11562
      cds = _GetClusterDomainSecret()
11563

    
11564
      # Check X509 key name
11565
      try:
11566
        (key_name, hmac_digest, hmac_salt) = self.x509_key_name
11567
      except (TypeError, ValueError), err:
11568
        raise errors.OpPrereqError("Invalid data for X509 key name: %s" % err)
11569

    
11570
      if not utils.VerifySha1Hmac(cds, key_name, hmac_digest, salt=hmac_salt):
11571
        raise errors.OpPrereqError("HMAC for X509 key name is wrong",
11572
                                   errors.ECODE_INVAL)
11573

    
11574
      # Load and verify CA
11575
      try:
11576
        (cert, _) = utils.LoadSignedX509Certificate(self.dest_x509_ca_pem, cds)
11577
      except OpenSSL.crypto.Error, err:
11578
        raise errors.OpPrereqError("Unable to load destination X509 CA (%s)" %
11579
                                   (err, ), errors.ECODE_INVAL)
11580

    
11581
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
11582
      if errcode is not None:
11583
        raise errors.OpPrereqError("Invalid destination X509 CA (%s)" %
11584
                                   (msg, ), errors.ECODE_INVAL)
11585

    
11586
      self.dest_x509_ca = cert
11587

    
11588
      # Verify target information
11589
      disk_info = []
11590
      for idx, disk_data in enumerate(self.op.target_node):
11591
        try:
11592
          (host, port, magic) = \
11593
            masterd.instance.CheckRemoteExportDiskInfo(cds, idx, disk_data)
11594
        except errors.GenericError, err:
11595
          raise errors.OpPrereqError("Target info for disk %s: %s" %
11596
                                     (idx, err), errors.ECODE_INVAL)
11597

    
11598
        disk_info.append((host, port, magic))
11599

    
11600
      assert len(disk_info) == len(self.op.target_node)
11601
      self.dest_disk_info = disk_info
11602

    
11603
    else:
11604
      raise errors.ProgrammerError("Unhandled export mode %r" %
11605
                                   self.op.mode)
11606

    
11607
    # instance disk type verification
11608
    # TODO: Implement export support for file-based disks
11609
    for disk in self.instance.disks:
11610
      if disk.dev_type == constants.LD_FILE:
11611
        raise errors.OpPrereqError("Export not supported for instances with"
11612
                                   " file-based disks", errors.ECODE_INVAL)
11613

    
11614
  def _CleanupExports(self, feedback_fn):
11615
    """Removes exports of current instance from all other nodes.
11616

11617
    If an instance in a cluster with nodes A..D was exported to node C, its
11618
    exports will be removed from the nodes A, B and D.
11619

11620
    """
11621
    assert self.op.mode != constants.EXPORT_MODE_REMOTE
11622

    
11623
    nodelist = self.cfg.GetNodeList()
11624
    nodelist.remove(self.dst_node.name)
11625

    
11626
    # on one-node clusters nodelist will be empty after the removal
11627
    # if we proceed the backup would be removed because OpBackupQuery
11628
    # substitutes an empty list with the full cluster node list.
11629
    iname = self.instance.name
11630
    if nodelist:
11631
      feedback_fn("Removing old exports for instance %s" % iname)
11632
      exportlist = self.rpc.call_export_list(nodelist)
11633
      for node in exportlist:
11634
        if exportlist[node].fail_msg:
11635
          continue
11636
        if iname in exportlist[node].payload:
11637
          msg = self.rpc.call_export_remove(node, iname).fail_msg
11638
          if msg:
11639
            self.LogWarning("Could not remove older export for instance %s"
11640
                            " on node %s: %s", iname, node, msg)
11641

    
11642
  def Exec(self, feedback_fn):
11643
    """Export an instance to an image in the cluster.
11644

11645
    """
11646
    assert self.op.mode in constants.EXPORT_MODES
11647

    
11648
    instance = self.instance
11649
    src_node = instance.primary_node
11650

    
11651
    if self.op.shutdown:
11652
      # shutdown the instance, but not the disks
11653
      feedback_fn("Shutting down instance %s" % instance.name)
11654
      result = self.rpc.call_instance_shutdown(src_node, instance,
11655
                                               self.op.shutdown_timeout)
11656
      # TODO: Maybe ignore failures if ignore_remove_failures is set
11657
      result.Raise("Could not shutdown instance %s on"
11658
                   " node %s" % (instance.name, src_node))
11659

    
11660
    # set the disks ID correctly since call_instance_start needs the
11661
    # correct drbd minor to create the symlinks
11662
    for disk in instance.disks:
11663
      self.cfg.SetDiskID(disk, src_node)
11664

    
11665
    activate_disks = (not instance.admin_up)
11666

    
11667
    if activate_disks:
11668
      # Activate the instance disks if we'exporting a stopped instance
11669
      feedback_fn("Activating disks for %s" % instance.name)
11670
      _StartInstanceDisks(self, instance, None)
11671

    
11672
    try:
11673
      helper = masterd.instance.ExportInstanceHelper(self, feedback_fn,
11674
                                                     instance)
11675

    
11676
      helper.CreateSnapshots()
11677
      try:
11678
        if (self.op.shutdown and instance.admin_up and
11679
            not self.op.remove_instance):
11680
          assert not activate_disks
11681
          feedback_fn("Starting instance %s" % instance.name)
11682
          result = self.rpc.call_instance_start(src_node, instance,
11683
                                                None, None, False)
11684
          msg = result.fail_msg
11685
          if msg:
11686
            feedback_fn("Failed to start instance: %s" % msg)
11687
            _ShutdownInstanceDisks(self, instance)
11688
            raise errors.OpExecError("Could not start instance: %s" % msg)
11689

    
11690
        if self.op.mode == constants.EXPORT_MODE_LOCAL:
11691
          (fin_resu, dresults) = helper.LocalExport(self.dst_node)
11692
        elif self.op.mode == constants.EXPORT_MODE_REMOTE:
11693
          connect_timeout = constants.RIE_CONNECT_TIMEOUT
11694
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
11695

    
11696
          (key_name, _, _) = self.x509_key_name
11697

    
11698
          dest_ca_pem = \
11699
            OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
11700
                                            self.dest_x509_ca)
11701

    
11702
          (fin_resu, dresults) = helper.RemoteExport(self.dest_disk_info,
11703
                                                     key_name, dest_ca_pem,
11704
                                                     timeouts)
11705
      finally:
11706
        helper.Cleanup()
11707

    
11708
      # Check for backwards compatibility
11709
      assert len(dresults) == len(instance.disks)
11710
      assert compat.all(isinstance(i, bool) for i in dresults), \
11711
             "Not all results are boolean: %r" % dresults
11712

    
11713
    finally:
11714
      if activate_disks:
11715
        feedback_fn("Deactivating disks for %s" % instance.name)
11716
        _ShutdownInstanceDisks(self, instance)
11717

    
11718
    if not (compat.all(dresults) and fin_resu):
11719
      failures = []
11720
      if not fin_resu:
11721
        failures.append("export finalization")
11722
      if not compat.all(dresults):
11723
        fdsk = utils.CommaJoin(idx for (idx, dsk) in enumerate(dresults)
11724
                               if not dsk)
11725
        failures.append("disk export: disk(s) %s" % fdsk)
11726

    
11727
      raise errors.OpExecError("Export failed, errors in %s" %
11728
                               utils.CommaJoin(failures))
11729

    
11730
    # At this point, the export was successful, we can cleanup/finish
11731

    
11732
    # Remove instance if requested
11733
    if self.op.remove_instance:
11734
      feedback_fn("Removing instance %s" % instance.name)
11735
      _RemoveInstance(self, feedback_fn, instance,
11736
                      self.op.ignore_remove_failures)
11737

    
11738
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
11739
      self._CleanupExports(feedback_fn)
11740

    
11741
    return fin_resu, dresults
11742

    
11743

    
11744
class LUBackupRemove(NoHooksLU):
11745
  """Remove exports related to the named instance.
11746

11747
  """
11748
  REQ_BGL = False
11749

    
11750
  def ExpandNames(self):
11751
    self.needed_locks = {}
11752
    # We need all nodes to be locked in order for RemoveExport to work, but we
11753
    # don't need to lock the instance itself, as nothing will happen to it (and
11754
    # we can remove exports also for a removed instance)
11755
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
11756

    
11757
  def Exec(self, feedback_fn):
11758
    """Remove any export.
11759

11760
    """
11761
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
11762
    # If the instance was not found we'll try with the name that was passed in.
11763
    # This will only work if it was an FQDN, though.
11764
    fqdn_warn = False
11765
    if not instance_name:
11766
      fqdn_warn = True
11767
      instance_name = self.op.instance_name
11768

    
11769
    locked_nodes = self.owned_locks(locking.LEVEL_NODE)
11770
    exportlist = self.rpc.call_export_list(locked_nodes)
11771
    found = False
11772
    for node in exportlist:
11773
      msg = exportlist[node].fail_msg
11774
      if msg:
11775
        self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
11776
        continue
11777
      if instance_name in exportlist[node].payload:
11778
        found = True
11779
        result = self.rpc.call_export_remove(node, instance_name)
11780
        msg = result.fail_msg
11781
        if msg:
11782
          logging.error("Could not remove export for instance %s"
11783
                        " on node %s: %s", instance_name, node, msg)
11784

    
11785
    if fqdn_warn and not found:
11786
      feedback_fn("Export not found. If trying to remove an export belonging"
11787
                  " to a deleted instance please use its Fully Qualified"
11788
                  " Domain Name.")
11789

    
11790

    
11791
class LUGroupAdd(LogicalUnit):
11792
  """Logical unit for creating node groups.
11793

11794
  """
11795
  HPATH = "group-add"
11796
  HTYPE = constants.HTYPE_GROUP
11797
  REQ_BGL = False
11798

    
11799
  def ExpandNames(self):
11800
    # We need the new group's UUID here so that we can create and acquire the
11801
    # corresponding lock. Later, in Exec(), we'll indicate to cfg.AddNodeGroup
11802
    # that it should not check whether the UUID exists in the configuration.
11803
    self.group_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
11804
    self.needed_locks = {}
11805
    self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
11806

    
11807
  def CheckPrereq(self):
11808
    """Check prerequisites.
11809

11810
    This checks that the given group name is not an existing node group
11811
    already.
11812

11813
    """
11814
    try:
11815
      existing_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
11816
    except errors.OpPrereqError:
11817
      pass
11818
    else:
11819
      raise errors.OpPrereqError("Desired group name '%s' already exists as a"
11820
                                 " node group (UUID: %s)" %
11821
                                 (self.op.group_name, existing_uuid),
11822
                                 errors.ECODE_EXISTS)
11823

    
11824
    if self.op.ndparams:
11825
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
11826

    
11827
  def BuildHooksEnv(self):
11828
    """Build hooks env.
11829

11830
    """
11831
    return {
11832
      "GROUP_NAME": self.op.group_name,
11833
      }
11834

    
11835
  def BuildHooksNodes(self):
11836
    """Build hooks nodes.
11837

11838
    """
11839
    mn = self.cfg.GetMasterNode()
11840
    return ([mn], [mn])
11841

    
11842
  def Exec(self, feedback_fn):
11843
    """Add the node group to the cluster.
11844

11845
    """
11846
    group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
11847
                                  uuid=self.group_uuid,
11848
                                  alloc_policy=self.op.alloc_policy,
11849
                                  ndparams=self.op.ndparams)
11850

    
11851
    self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
11852
    del self.remove_locks[locking.LEVEL_NODEGROUP]
11853

    
11854

    
11855
class LUGroupAssignNodes(NoHooksLU):
11856
  """Logical unit for assigning nodes to groups.
11857

11858
  """
11859
  REQ_BGL = False
11860

    
11861
  def ExpandNames(self):
11862
    # These raise errors.OpPrereqError on their own:
11863
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
11864
    self.op.nodes = _GetWantedNodes(self, self.op.nodes)
11865

    
11866
    # We want to lock all the affected nodes and groups. We have readily
11867
    # available the list of nodes, and the *destination* group. To gather the
11868
    # list of "source" groups, we need to fetch node information later on.
11869
    self.needed_locks = {
11870
      locking.LEVEL_NODEGROUP: set([self.group_uuid]),
11871
      locking.LEVEL_NODE: self.op.nodes,
11872
      }
11873

    
11874
  def DeclareLocks(self, level):
11875
    if level == locking.LEVEL_NODEGROUP:
11876
      assert len(self.needed_locks[locking.LEVEL_NODEGROUP]) == 1
11877

    
11878
      # Try to get all affected nodes' groups without having the group or node
11879
      # lock yet. Needs verification later in the code flow.
11880
      groups = self.cfg.GetNodeGroupsFromNodes(self.op.nodes)
11881

    
11882
      self.needed_locks[locking.LEVEL_NODEGROUP].update(groups)
11883

    
11884
  def CheckPrereq(self):
11885
    """Check prerequisites.
11886

11887
    """
11888
    assert self.needed_locks[locking.LEVEL_NODEGROUP]
11889
    assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
11890
            frozenset(self.op.nodes))
11891

    
11892
    expected_locks = (set([self.group_uuid]) |
11893
                      self.cfg.GetNodeGroupsFromNodes(self.op.nodes))
11894
    actual_locks = self.owned_locks(locking.LEVEL_NODEGROUP)
11895
    if actual_locks != expected_locks:
11896
      raise errors.OpExecError("Nodes changed groups since locks were acquired,"
11897
                               " current groups are '%s', used to be '%s'" %
11898
                               (utils.CommaJoin(expected_locks),
11899
                                utils.CommaJoin(actual_locks)))
11900

    
11901
    self.node_data = self.cfg.GetAllNodesInfo()
11902
    self.group = self.cfg.GetNodeGroup(self.group_uuid)
11903
    instance_data = self.cfg.GetAllInstancesInfo()
11904

    
11905
    if self.group is None:
11906
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
11907
                               (self.op.group_name, self.group_uuid))
11908

    
11909
    (new_splits, previous_splits) = \
11910
      self.CheckAssignmentForSplitInstances([(node, self.group_uuid)
11911
                                             for node in self.op.nodes],
11912
                                            self.node_data, instance_data)
11913

    
11914
    if new_splits:
11915
      fmt_new_splits = utils.CommaJoin(utils.NiceSort(new_splits))
11916

    
11917
      if not self.op.force:
11918
        raise errors.OpExecError("The following instances get split by this"
11919
                                 " change and --force was not given: %s" %
11920
                                 fmt_new_splits)
11921
      else:
11922
        self.LogWarning("This operation will split the following instances: %s",
11923
                        fmt_new_splits)
11924

    
11925
        if previous_splits:
11926
          self.LogWarning("In addition, these already-split instances continue"
11927
                          " to be split across groups: %s",
11928
                          utils.CommaJoin(utils.NiceSort(previous_splits)))
11929

    
11930
  def Exec(self, feedback_fn):
11931
    """Assign nodes to a new group.
11932

11933
    """
11934
    for node in self.op.nodes:
11935
      self.node_data[node].group = self.group_uuid
11936

    
11937
    # FIXME: Depends on side-effects of modifying the result of
11938
    # C{cfg.GetAllNodesInfo}
11939

    
11940
    self.cfg.Update(self.group, feedback_fn) # Saves all modified nodes.
11941

    
11942
  @staticmethod
11943
  def CheckAssignmentForSplitInstances(changes, node_data, instance_data):
11944
    """Check for split instances after a node assignment.
11945

11946
    This method considers a series of node assignments as an atomic operation,
11947
    and returns information about split instances after applying the set of
11948
    changes.
11949

11950
    In particular, it returns information about newly split instances, and
11951
    instances that were already split, and remain so after the change.
11952

11953
    Only instances whose disk template is listed in constants.DTS_INT_MIRROR are
11954
    considered.
11955

11956
    @type changes: list of (node_name, new_group_uuid) pairs.
11957
    @param changes: list of node assignments to consider.
11958
    @param node_data: a dict with data for all nodes
11959
    @param instance_data: a dict with all instances to consider
11960
    @rtype: a two-tuple
11961
    @return: a list of instances that were previously okay and result split as a
11962
      consequence of this change, and a list of instances that were previously
11963
      split and this change does not fix.
11964

11965
    """
11966
    changed_nodes = dict((node, group) for node, group in changes
11967
                         if node_data[node].group != group)
11968

    
11969
    all_split_instances = set()
11970
    previously_split_instances = set()
11971

    
11972
    def InstanceNodes(instance):
11973
      return [instance.primary_node] + list(instance.secondary_nodes)
11974

    
11975
    for inst in instance_data.values():
11976
      if inst.disk_template not in constants.DTS_INT_MIRROR:
11977
        continue
11978

    
11979
      instance_nodes = InstanceNodes(inst)
11980

    
11981
      if len(set(node_data[node].group for node in instance_nodes)) > 1:
11982
        previously_split_instances.add(inst.name)
11983

    
11984
      if len(set(changed_nodes.get(node, node_data[node].group)
11985
                 for node in instance_nodes)) > 1:
11986
        all_split_instances.add(inst.name)
11987

    
11988
    return (list(all_split_instances - previously_split_instances),
11989
            list(previously_split_instances & all_split_instances))
11990

    
11991

    
11992
class _GroupQuery(_QueryBase):
11993
  FIELDS = query.GROUP_FIELDS
11994

    
11995
  def ExpandNames(self, lu):
11996
    lu.needed_locks = {}
11997

    
11998
    self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
11999
    name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values())
12000

    
12001
    if not self.names:
12002
      self.wanted = [name_to_uuid[name]
12003
                     for name in utils.NiceSort(name_to_uuid.keys())]
12004
    else:
12005
      # Accept names to be either names or UUIDs.
12006
      missing = []
12007
      self.wanted = []
12008
      all_uuid = frozenset(self._all_groups.keys())
12009

    
12010
      for name in self.names:
12011
        if name in all_uuid:
12012
          self.wanted.append(name)
12013
        elif name in name_to_uuid:
12014
          self.wanted.append(name_to_uuid[name])
12015
        else:
12016
          missing.append(name)
12017

    
12018
      if missing:
12019
        raise errors.OpPrereqError("Some groups do not exist: %s" %
12020
                                   utils.CommaJoin(missing),
12021
                                   errors.ECODE_NOENT)
12022

    
12023
  def DeclareLocks(self, lu, level):
12024
    pass
12025

    
12026
  def _GetQueryData(self, lu):
12027
    """Computes the list of node groups and their attributes.
12028

12029
    """
12030
    do_nodes = query.GQ_NODE in self.requested_data
12031
    do_instances = query.GQ_INST in self.requested_data
12032

    
12033
    group_to_nodes = None
12034
    group_to_instances = None
12035

    
12036
    # For GQ_NODE, we need to map group->[nodes], and group->[instances] for
12037
    # GQ_INST. The former is attainable with just GetAllNodesInfo(), but for the
12038
    # latter GetAllInstancesInfo() is not enough, for we have to go through
12039
    # instance->node. Hence, we will need to process nodes even if we only need
12040
    # instance information.
12041
    if do_nodes or do_instances:
12042
      all_nodes = lu.cfg.GetAllNodesInfo()
12043
      group_to_nodes = dict((uuid, []) for uuid in self.wanted)
12044
      node_to_group = {}
12045

    
12046
      for node in all_nodes.values():
12047
        if node.group in group_to_nodes:
12048
          group_to_nodes[node.group].append(node.name)
12049
          node_to_group[node.name] = node.group
12050

    
12051
      if do_instances:
12052
        all_instances = lu.cfg.GetAllInstancesInfo()
12053
        group_to_instances = dict((uuid, []) for uuid in self.wanted)
12054

    
12055
        for instance in all_instances.values():
12056
          node = instance.primary_node
12057
          if node in node_to_group:
12058
            group_to_instances[node_to_group[node]].append(instance.name)
12059

    
12060
        if not do_nodes:
12061
          # Do not pass on node information if it was not requested.
12062
          group_to_nodes = None
12063

    
12064
    return query.GroupQueryData([self._all_groups[uuid]
12065
                                 for uuid in self.wanted],
12066
                                group_to_nodes, group_to_instances)
12067

    
12068

    
12069
class LUGroupQuery(NoHooksLU):
12070
  """Logical unit for querying node groups.
12071

12072
  """
12073
  REQ_BGL = False
12074

    
12075
  def CheckArguments(self):
12076
    self.gq = _GroupQuery(qlang.MakeSimpleFilter("name", self.op.names),
12077
                          self.op.output_fields, False)
12078

    
12079
  def ExpandNames(self):
12080
    self.gq.ExpandNames(self)
12081

    
12082
  def DeclareLocks(self, level):
12083
    self.gq.DeclareLocks(self, level)
12084

    
12085
  def Exec(self, feedback_fn):
12086
    return self.gq.OldStyleQuery(self)
12087

    
12088

    
12089
class LUGroupSetParams(LogicalUnit):
12090
  """Modifies the parameters of a node group.
12091

12092
  """
12093
  HPATH = "group-modify"
12094
  HTYPE = constants.HTYPE_GROUP
12095
  REQ_BGL = False
12096

    
12097
  def CheckArguments(self):
12098
    all_changes = [
12099
      self.op.ndparams,
12100
      self.op.alloc_policy,
12101
      ]
12102

    
12103
    if all_changes.count(None) == len(all_changes):
12104
      raise errors.OpPrereqError("Please pass at least one modification",
12105
                                 errors.ECODE_INVAL)
12106

    
12107
  def ExpandNames(self):
12108
    # This raises errors.OpPrereqError on its own:
12109
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
12110

    
12111
    self.needed_locks = {
12112
      locking.LEVEL_NODEGROUP: [self.group_uuid],
12113
      }
12114

    
12115
  def CheckPrereq(self):
12116
    """Check prerequisites.
12117

12118
    """
12119
    self.group = self.cfg.GetNodeGroup(self.group_uuid)
12120

    
12121
    if self.group is None:
12122
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
12123
                               (self.op.group_name, self.group_uuid))
12124

    
12125
    if self.op.ndparams:
12126
      new_ndparams = _GetUpdatedParams(self.group.ndparams, self.op.ndparams)
12127
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
12128
      self.new_ndparams = new_ndparams
12129

    
12130
  def BuildHooksEnv(self):
12131
    """Build hooks env.
12132

12133
    """
12134
    return {
12135
      "GROUP_NAME": self.op.group_name,
12136
      "NEW_ALLOC_POLICY": self.op.alloc_policy,
12137
      }
12138

    
12139
  def BuildHooksNodes(self):
12140
    """Build hooks nodes.
12141

12142
    """
12143
    mn = self.cfg.GetMasterNode()
12144
    return ([mn], [mn])
12145

    
12146
  def Exec(self, feedback_fn):
12147
    """Modifies the node group.
12148

12149
    """
12150
    result = []
12151

    
12152
    if self.op.ndparams:
12153
      self.group.ndparams = self.new_ndparams
12154
      result.append(("ndparams", str(self.group.ndparams)))
12155

    
12156
    if self.op.alloc_policy:
12157
      self.group.alloc_policy = self.op.alloc_policy
12158

    
12159
    self.cfg.Update(self.group, feedback_fn)
12160
    return result
12161

    
12162

    
12163
class LUGroupRemove(LogicalUnit):
12164
  HPATH = "group-remove"
12165
  HTYPE = constants.HTYPE_GROUP
12166
  REQ_BGL = False
12167

    
12168
  def ExpandNames(self):
12169
    # This will raises errors.OpPrereqError on its own:
12170
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
12171
    self.needed_locks = {
12172
      locking.LEVEL_NODEGROUP: [self.group_uuid],
12173
      }
12174

    
12175
  def CheckPrereq(self):
12176
    """Check prerequisites.
12177

12178
    This checks that the given group name exists as a node group, that is
12179
    empty (i.e., contains no nodes), and that is not the last group of the
12180
    cluster.
12181

12182
    """
12183
    # Verify that the group is empty.
12184
    group_nodes = [node.name
12185
                   for node in self.cfg.GetAllNodesInfo().values()
12186
                   if node.group == self.group_uuid]
12187

    
12188
    if group_nodes:
12189
      raise errors.OpPrereqError("Group '%s' not empty, has the following"
12190
                                 " nodes: %s" %
12191
                                 (self.op.group_name,
12192
                                  utils.CommaJoin(utils.NiceSort(group_nodes))),
12193
                                 errors.ECODE_STATE)
12194

    
12195
    # Verify the cluster would not be left group-less.
12196
    if len(self.cfg.GetNodeGroupList()) == 1:
12197
      raise errors.OpPrereqError("Group '%s' is the only group,"
12198
                                 " cannot be removed" %
12199
                                 self.op.group_name,
12200
                                 errors.ECODE_STATE)
12201

    
12202
  def BuildHooksEnv(self):
12203
    """Build hooks env.
12204

12205
    """
12206
    return {
12207
      "GROUP_NAME": self.op.group_name,
12208
      }
12209

    
12210
  def BuildHooksNodes(self):
12211
    """Build hooks nodes.
12212

12213
    """
12214
    mn = self.cfg.GetMasterNode()
12215
    return ([mn], [mn])
12216

    
12217
  def Exec(self, feedback_fn):
12218
    """Remove the node group.
12219

12220
    """
12221
    try:
12222
      self.cfg.RemoveNodeGroup(self.group_uuid)
12223
    except errors.ConfigurationError:
12224
      raise errors.OpExecError("Group '%s' with UUID %s disappeared" %
12225
                               (self.op.group_name, self.group_uuid))
12226

    
12227
    self.remove_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
12228

    
12229

    
12230
class LUGroupRename(LogicalUnit):
12231
  HPATH = "group-rename"
12232
  HTYPE = constants.HTYPE_GROUP
12233
  REQ_BGL = False
12234

    
12235
  def ExpandNames(self):
12236
    # This raises errors.OpPrereqError on its own:
12237
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
12238

    
12239
    self.needed_locks = {
12240
      locking.LEVEL_NODEGROUP: [self.group_uuid],
12241
      }
12242

    
12243
  def CheckPrereq(self):
12244
    """Check prerequisites.
12245

12246
    Ensures requested new name is not yet used.
12247

12248
    """
12249
    try:
12250
      new_name_uuid = self.cfg.LookupNodeGroup(self.op.new_name)
12251
    except errors.OpPrereqError:
12252
      pass
12253
    else:
12254
      raise errors.OpPrereqError("Desired new name '%s' clashes with existing"
12255
                                 " node group (UUID: %s)" %
12256
                                 (self.op.new_name, new_name_uuid),
12257
                                 errors.ECODE_EXISTS)
12258

    
12259
  def BuildHooksEnv(self):
12260
    """Build hooks env.
12261

12262
    """
12263
    return {
12264
      "OLD_NAME": self.op.group_name,
12265
      "NEW_NAME": self.op.new_name,
12266
      }
12267

    
12268
  def BuildHooksNodes(self):
12269
    """Build hooks nodes.
12270

12271
    """
12272
    mn = self.cfg.GetMasterNode()
12273

    
12274
    all_nodes = self.cfg.GetAllNodesInfo()
12275
    all_nodes.pop(mn, None)
12276

    
12277
    run_nodes = [mn]
12278
    run_nodes.extend(node.name for node in all_nodes.values()
12279
                     if node.group == self.group_uuid)
12280

    
12281
    return (run_nodes, run_nodes)
12282

    
12283
  def Exec(self, feedback_fn):
12284
    """Rename the node group.
12285

12286
    """
12287
    group = self.cfg.GetNodeGroup(self.group_uuid)
12288

    
12289
    if group is None:
12290
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
12291
                               (self.op.group_name, self.group_uuid))
12292

    
12293
    group.name = self.op.new_name
12294
    self.cfg.Update(group, feedback_fn)
12295

    
12296
    return self.op.new_name
12297

    
12298

    
12299
class LUGroupEvacuate(LogicalUnit):
12300
  HPATH = "group-evacuate"
12301
  HTYPE = constants.HTYPE_GROUP
12302
  REQ_BGL = False
12303

    
12304
  def ExpandNames(self):
12305
    # This raises errors.OpPrereqError on its own:
12306
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
12307

    
12308
    if self.op.target_groups:
12309
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
12310
                                  self.op.target_groups)
12311
    else:
12312
      self.req_target_uuids = []
12313

    
12314
    if self.group_uuid in self.req_target_uuids:
12315
      raise errors.OpPrereqError("Group to be evacuated (%s) can not be used"
12316
                                 " as a target group (targets are %s)" %
12317
                                 (self.group_uuid,
12318
                                  utils.CommaJoin(self.req_target_uuids)),
12319
                                 errors.ECODE_INVAL)
12320

    
12321
    self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
12322

    
12323
    self.share_locks = _ShareAll()
12324
    self.needed_locks = {
12325
      locking.LEVEL_INSTANCE: [],
12326
      locking.LEVEL_NODEGROUP: [],
12327
      locking.LEVEL_NODE: [],
12328
      }
12329

    
12330
  def DeclareLocks(self, level):
12331
    if level == locking.LEVEL_INSTANCE:
12332
      assert not self.needed_locks[locking.LEVEL_INSTANCE]
12333

    
12334
      # Lock instances optimistically, needs verification once node and group
12335
      # locks have been acquired
12336
      self.needed_locks[locking.LEVEL_INSTANCE] = \
12337
        self.cfg.GetNodeGroupInstances(self.group_uuid)
12338

    
12339
    elif level == locking.LEVEL_NODEGROUP:
12340
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
12341

    
12342
      if self.req_target_uuids:
12343
        lock_groups = set([self.group_uuid] + self.req_target_uuids)
12344

    
12345
        # Lock all groups used by instances optimistically; this requires going
12346
        # via the node before it's locked, requiring verification later on
12347
        lock_groups.update(group_uuid
12348
                           for instance_name in
12349
                             self.owned_locks(locking.LEVEL_INSTANCE)
12350
                           for group_uuid in
12351
                             self.cfg.GetInstanceNodeGroups(instance_name))
12352
      else:
12353
        # No target groups, need to lock all of them
12354
        lock_groups = locking.ALL_SET
12355

    
12356
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
12357

    
12358
    elif level == locking.LEVEL_NODE:
12359
      # This will only lock the nodes in the group to be evacuated which
12360
      # contain actual instances
12361
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
12362
      self._LockInstancesNodes()
12363

    
12364
      # Lock all nodes in group to be evacuated and target groups
12365
      owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
12366
      assert self.group_uuid in owned_groups
12367
      member_nodes = [node_name
12368
                      for group in owned_groups
12369
                      for node_name in self.cfg.GetNodeGroup(group).members]
12370
      self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
12371

    
12372
  def CheckPrereq(self):
12373
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
12374
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
12375
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
12376

    
12377
    assert owned_groups.issuperset(self.req_target_uuids)
12378
    assert self.group_uuid in owned_groups
12379

    
12380
    # Check if locked instances are still correct
12381
    _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
12382

    
12383
    # Get instance information
12384
    self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
12385

    
12386
    # Check if node groups for locked instances are still correct
12387
    for instance_name in owned_instances:
12388
      inst = self.instances[instance_name]
12389
      assert owned_nodes.issuperset(inst.all_nodes), \
12390
        "Instance %s's nodes changed while we kept the lock" % instance_name
12391

    
12392
      inst_groups = _CheckInstanceNodeGroups(self.cfg, instance_name,
12393
                                             owned_groups)
12394

    
12395
      assert self.group_uuid in inst_groups, \
12396
        "Instance %s has no node in group %s" % (instance_name, self.group_uuid)
12397

    
12398
    if self.req_target_uuids:
12399
      # User requested specific target groups
12400
      self.target_uuids = self.req_target_uuids
12401
    else:
12402
      # All groups except the one to be evacuated are potential targets
12403
      self.target_uuids = [group_uuid for group_uuid in owned_groups
12404
                           if group_uuid != self.group_uuid]
12405

    
12406
      if not self.target_uuids:
12407
        raise errors.OpPrereqError("There are no possible target groups",
12408
                                   errors.ECODE_INVAL)
12409

    
12410
  def BuildHooksEnv(self):
12411
    """Build hooks env.
12412

12413
    """
12414
    return {
12415
      "GROUP_NAME": self.op.group_name,
12416
      "TARGET_GROUPS": " ".join(self.target_uuids),
12417
      }
12418

    
12419
  def BuildHooksNodes(self):
12420
    """Build hooks nodes.
12421

12422
    """
12423
    mn = self.cfg.GetMasterNode()
12424

    
12425
    assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
12426

    
12427
    run_nodes = [mn] + self.cfg.GetNodeGroup(self.group_uuid).members
12428

    
12429
    return (run_nodes, run_nodes)
12430

    
12431
  def Exec(self, feedback_fn):
12432
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
12433

    
12434
    assert self.group_uuid not in self.target_uuids
12435

    
12436
    ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_CHG_GROUP,
12437
                     instances=instances, target_groups=self.target_uuids)
12438

    
12439
    ial.Run(self.op.iallocator)
12440

    
12441
    if not ial.success:
12442
      raise errors.OpPrereqError("Can't compute group evacuation using"
12443
                                 " iallocator '%s': %s" %
12444
                                 (self.op.iallocator, ial.info),
12445
                                 errors.ECODE_NORES)
12446

    
12447
    jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
12448

    
12449
    self.LogInfo("Iallocator returned %s job(s) for evacuating node group %s",
12450
                 len(jobs), self.op.group_name)
12451

    
12452
    return ResultWithJobs(jobs)
12453

    
12454

    
12455
class TagsLU(NoHooksLU): # pylint: disable=W0223
12456
  """Generic tags LU.
12457

12458
  This is an abstract class which is the parent of all the other tags LUs.
12459

12460
  """
12461
  def ExpandNames(self):
12462
    self.group_uuid = None
12463
    self.needed_locks = {}
12464
    if self.op.kind == constants.TAG_NODE:
12465
      self.op.name = _ExpandNodeName(self.cfg, self.op.name)
12466
      self.needed_locks[locking.LEVEL_NODE] = self.op.name
12467
    elif self.op.kind == constants.TAG_INSTANCE:
12468
      self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
12469
      self.needed_locks[locking.LEVEL_INSTANCE] = self.op.name
12470
    elif self.op.kind == constants.TAG_NODEGROUP:
12471
      self.group_uuid = self.cfg.LookupNodeGroup(self.op.name)
12472

    
12473
    # FIXME: Acquire BGL for cluster tag operations (as of this writing it's
12474
    # not possible to acquire the BGL based on opcode parameters)
12475

    
12476
  def CheckPrereq(self):
12477
    """Check prerequisites.
12478

12479
    """
12480
    if self.op.kind == constants.TAG_CLUSTER:
12481
      self.target = self.cfg.GetClusterInfo()
12482
    elif self.op.kind == constants.TAG_NODE:
12483
      self.target = self.cfg.GetNodeInfo(self.op.name)
12484
    elif self.op.kind == constants.TAG_INSTANCE:
12485
      self.target = self.cfg.GetInstanceInfo(self.op.name)
12486
    elif self.op.kind == constants.TAG_NODEGROUP:
12487
      self.target = self.cfg.GetNodeGroup(self.group_uuid)
12488
    else:
12489
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
12490
                                 str(self.op.kind), errors.ECODE_INVAL)
12491

    
12492

    
12493
class LUTagsGet(TagsLU):
12494
  """Returns the tags of a given object.
12495

12496
  """
12497
  REQ_BGL = False
12498

    
12499
  def ExpandNames(self):
12500
    TagsLU.ExpandNames(self)
12501

    
12502
    # Share locks as this is only a read operation
12503
    self.share_locks = _ShareAll()
12504

    
12505
  def Exec(self, feedback_fn):
12506
    """Returns the tag list.
12507

12508
    """
12509
    return list(self.target.GetTags())
12510

    
12511

    
12512
class LUTagsSearch(NoHooksLU):
12513
  """Searches the tags for a given pattern.
12514

12515
  """
12516
  REQ_BGL = False
12517

    
12518
  def ExpandNames(self):
12519
    self.needed_locks = {}
12520

    
12521
  def CheckPrereq(self):
12522
    """Check prerequisites.
12523

12524
    This checks the pattern passed for validity by compiling it.
12525

12526
    """
12527
    try:
12528
      self.re = re.compile(self.op.pattern)
12529
    except re.error, err:
12530
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
12531
                                 (self.op.pattern, err), errors.ECODE_INVAL)
12532

    
12533
  def Exec(self, feedback_fn):
12534
    """Returns the tag list.
12535

12536
    """
12537
    cfg = self.cfg
12538
    tgts = [("/cluster", cfg.GetClusterInfo())]
12539
    ilist = cfg.GetAllInstancesInfo().values()
12540
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
12541
    nlist = cfg.GetAllNodesInfo().values()
12542
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
12543
    tgts.extend(("/nodegroup/%s" % n.name, n)
12544
                for n in cfg.GetAllNodeGroupsInfo().values())
12545
    results = []
12546
    for path, target in tgts:
12547
      for tag in target.GetTags():
12548
        if self.re.search(tag):
12549
          results.append((path, tag))
12550
    return results
12551

    
12552

    
12553
class LUTagsSet(TagsLU):
12554
  """Sets a tag on a given object.
12555

12556
  """
12557
  REQ_BGL = False
12558

    
12559
  def CheckPrereq(self):
12560
    """Check prerequisites.
12561

12562
    This checks the type and length of the tag name and value.
12563

12564
    """
12565
    TagsLU.CheckPrereq(self)
12566
    for tag in self.op.tags:
12567
      objects.TaggableObject.ValidateTag(tag)
12568

    
12569
  def Exec(self, feedback_fn):
12570
    """Sets the tag.
12571

12572
    """
12573
    try:
12574
      for tag in self.op.tags:
12575
        self.target.AddTag(tag)
12576
    except errors.TagError, err:
12577
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
12578
    self.cfg.Update(self.target, feedback_fn)
12579

    
12580

    
12581
class LUTagsDel(TagsLU):
12582
  """Delete a list of tags from a given object.
12583

12584
  """
12585
  REQ_BGL = False
12586

    
12587
  def CheckPrereq(self):
12588
    """Check prerequisites.
12589

12590
    This checks that we have the given tag.
12591

12592
    """
12593
    TagsLU.CheckPrereq(self)
12594
    for tag in self.op.tags:
12595
      objects.TaggableObject.ValidateTag(tag)
12596
    del_tags = frozenset(self.op.tags)
12597
    cur_tags = self.target.GetTags()
12598

    
12599
    diff_tags = del_tags - cur_tags
12600
    if diff_tags:
12601
      diff_names = ("'%s'" % i for i in sorted(diff_tags))
12602
      raise errors.OpPrereqError("Tag(s) %s not found" %
12603
                                 (utils.CommaJoin(diff_names), ),
12604
                                 errors.ECODE_NOENT)
12605

    
12606
  def Exec(self, feedback_fn):
12607
    """Remove the tag from the object.
12608

12609
    """
12610
    for tag in self.op.tags:
12611
      self.target.RemoveTag(tag)
12612
    self.cfg.Update(self.target, feedback_fn)
12613

    
12614

    
12615
class LUTestDelay(NoHooksLU):
12616
  """Sleep for a specified amount of time.
12617

12618
  This LU sleeps on the master and/or nodes for a specified amount of
12619
  time.
12620

12621
  """
12622
  REQ_BGL = False
12623

    
12624
  def ExpandNames(self):
12625
    """Expand names and set required locks.
12626

12627
    This expands the node list, if any.
12628

12629
    """
12630
    self.needed_locks = {}
12631
    if self.op.on_nodes:
12632
      # _GetWantedNodes can be used here, but is not always appropriate to use
12633
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
12634
      # more information.
12635
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
12636
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
12637

    
12638
  def _TestDelay(self):
12639
    """Do the actual sleep.
12640

12641
    """
12642
    if self.op.on_master:
12643
      if not utils.TestDelay(self.op.duration):
12644
        raise errors.OpExecError("Error during master delay test")
12645
    if self.op.on_nodes:
12646
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
12647
      for node, node_result in result.items():
12648
        node_result.Raise("Failure during rpc call to node %s" % node)
12649

    
12650
  def Exec(self, feedback_fn):
12651
    """Execute the test delay opcode, with the wanted repetitions.
12652

12653
    """
12654
    if self.op.repeat == 0:
12655
      self._TestDelay()
12656
    else:
12657
      top_value = self.op.repeat - 1
12658
      for i in range(self.op.repeat):
12659
        self.LogInfo("Test delay iteration %d/%d" % (i, top_value))
12660
        self._TestDelay()
12661

    
12662

    
12663
class LUTestJqueue(NoHooksLU):
12664
  """Utility LU to test some aspects of the job queue.
12665

12666
  """
12667
  REQ_BGL = False
12668

    
12669
  # Must be lower than default timeout for WaitForJobChange to see whether it
12670
  # notices changed jobs
12671
  _CLIENT_CONNECT_TIMEOUT = 20.0
12672
  _CLIENT_CONFIRM_TIMEOUT = 60.0
12673

    
12674
  @classmethod
12675
  def _NotifyUsingSocket(cls, cb, errcls):
12676
    """Opens a Unix socket and waits for another program to connect.
12677

12678
    @type cb: callable
12679
    @param cb: Callback to send socket name to client
12680
    @type errcls: class
12681
    @param errcls: Exception class to use for errors
12682

12683
    """
12684
    # Using a temporary directory as there's no easy way to create temporary
12685
    # sockets without writing a custom loop around tempfile.mktemp and
12686
    # socket.bind
12687
    tmpdir = tempfile.mkdtemp()
12688
    try:
12689
      tmpsock = utils.PathJoin(tmpdir, "sock")
12690

    
12691
      logging.debug("Creating temporary socket at %s", tmpsock)
12692
      sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
12693
      try:
12694
        sock.bind(tmpsock)
12695
        sock.listen(1)
12696

    
12697
        # Send details to client
12698
        cb(tmpsock)
12699

    
12700
        # Wait for client to connect before continuing
12701
        sock.settimeout(cls._CLIENT_CONNECT_TIMEOUT)
12702
        try:
12703
          (conn, _) = sock.accept()
12704
        except socket.error, err:
12705
          raise errcls("Client didn't connect in time (%s)" % err)
12706
      finally:
12707
        sock.close()
12708
    finally:
12709
      # Remove as soon as client is connected
12710
      shutil.rmtree(tmpdir)
12711

    
12712
    # Wait for client to close
12713
    try:
12714
      try:
12715
        # pylint: disable=E1101
12716
        # Instance of '_socketobject' has no ... member
12717
        conn.settimeout(cls._CLIENT_CONFIRM_TIMEOUT)
12718
        conn.recv(1)
12719
      except socket.error, err:
12720
        raise errcls("Client failed to confirm notification (%s)" % err)
12721
    finally:
12722
      conn.close()
12723

    
12724
  def _SendNotification(self, test, arg, sockname):
12725
    """Sends a notification to the client.
12726

12727
    @type test: string
12728
    @param test: Test name
12729
    @param arg: Test argument (depends on test)
12730
    @type sockname: string
12731
    @param sockname: Socket path
12732

12733
    """
12734
    self.Log(constants.ELOG_JQUEUE_TEST, (sockname, test, arg))
12735

    
12736
  def _Notify(self, prereq, test, arg):
12737
    """Notifies the client of a test.
12738

12739
    @type prereq: bool
12740
    @param prereq: Whether this is a prereq-phase test
12741
    @type test: string
12742
    @param test: Test name
12743
    @param arg: Test argument (depends on test)
12744

12745
    """
12746
    if prereq:
12747
      errcls = errors.OpPrereqError
12748
    else:
12749
      errcls = errors.OpExecError
12750

    
12751
    return self._NotifyUsingSocket(compat.partial(self._SendNotification,
12752
                                                  test, arg),
12753
                                   errcls)
12754

    
12755
  def CheckArguments(self):
12756
    self.checkargs_calls = getattr(self, "checkargs_calls", 0) + 1
12757
    self.expandnames_calls = 0
12758

    
12759
  def ExpandNames(self):
12760
    checkargs_calls = getattr(self, "checkargs_calls", 0)
12761
    if checkargs_calls < 1:
12762
      raise errors.ProgrammerError("CheckArguments was not called")
12763

    
12764
    self.expandnames_calls += 1
12765

    
12766
    if self.op.notify_waitlock:
12767
      self._Notify(True, constants.JQT_EXPANDNAMES, None)
12768

    
12769
    self.LogInfo("Expanding names")
12770

    
12771
    # Get lock on master node (just to get a lock, not for a particular reason)
12772
    self.needed_locks = {
12773
      locking.LEVEL_NODE: self.cfg.GetMasterNode(),
12774
      }
12775

    
12776
  def Exec(self, feedback_fn):
12777
    if self.expandnames_calls < 1:
12778
      raise errors.ProgrammerError("ExpandNames was not called")
12779

    
12780
    if self.op.notify_exec:
12781
      self._Notify(False, constants.JQT_EXEC, None)
12782

    
12783
    self.LogInfo("Executing")
12784

    
12785
    if self.op.log_messages:
12786
      self._Notify(False, constants.JQT_STARTMSG, len(self.op.log_messages))
12787
      for idx, msg in enumerate(self.op.log_messages):
12788
        self.LogInfo("Sending log message %s", idx + 1)
12789
        feedback_fn(constants.JQT_MSGPREFIX + msg)
12790
        # Report how many test messages have been sent
12791
        self._Notify(False, constants.JQT_LOGMSG, idx + 1)
12792

    
12793
    if self.op.fail:
12794
      raise errors.OpExecError("Opcode failure was requested")
12795

    
12796
    return True
12797

    
12798

    
12799
class IAllocator(object):
12800
  """IAllocator framework.
12801

12802
  An IAllocator instance has three sets of attributes:
12803
    - cfg that is needed to query the cluster
12804
    - input data (all members of the _KEYS class attribute are required)
12805
    - four buffer attributes (in|out_data|text), that represent the
12806
      input (to the external script) in text and data structure format,
12807
      and the output from it, again in two formats
12808
    - the result variables from the script (success, info, nodes) for
12809
      easy usage
12810

12811
  """
12812
  # pylint: disable=R0902
12813
  # lots of instance attributes
12814

    
12815
  def __init__(self, cfg, rpc, mode, **kwargs):
12816
    self.cfg = cfg
12817
    self.rpc = rpc
12818
    # init buffer variables
12819
    self.in_text = self.out_text = self.in_data = self.out_data = None
12820
    # init all input fields so that pylint is happy
12821
    self.mode = mode
12822
    self.memory = self.disks = self.disk_template = None
12823
    self.os = self.tags = self.nics = self.vcpus = None
12824
    self.hypervisor = None
12825
    self.relocate_from = None
12826
    self.name = None
12827
    self.instances = None
12828
    self.evac_mode = None
12829
    self.target_groups = []
12830
    # computed fields
12831
    self.required_nodes = None
12832
    # init result fields
12833
    self.success = self.info = self.result = None
12834

    
12835
    try:
12836
      (fn, keydata, self._result_check) = self._MODE_DATA[self.mode]
12837
    except KeyError:
12838
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
12839
                                   " IAllocator" % self.mode)
12840

    
12841
    keyset = [n for (n, _) in keydata]
12842

    
12843
    for key in kwargs:
12844
      if key not in keyset:
12845
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
12846
                                     " IAllocator" % key)
12847
      setattr(self, key, kwargs[key])
12848

    
12849
    for key in keyset:
12850
      if key not in kwargs:
12851
        raise errors.ProgrammerError("Missing input parameter '%s' to"
12852
                                     " IAllocator" % key)
12853
    self._BuildInputData(compat.partial(fn, self), keydata)
12854

    
12855
  def _ComputeClusterData(self):
12856
    """Compute the generic allocator input data.
12857

12858
    This is the data that is independent of the actual operation.
12859

12860
    """
12861
    cfg = self.cfg
12862
    cluster_info = cfg.GetClusterInfo()
12863
    # cluster data
12864
    data = {
12865
      "version": constants.IALLOCATOR_VERSION,
12866
      "cluster_name": cfg.GetClusterName(),
12867
      "cluster_tags": list(cluster_info.GetTags()),
12868
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
12869
      # we don't have job IDs
12870
      }
12871
    ninfo = cfg.GetAllNodesInfo()
12872
    iinfo = cfg.GetAllInstancesInfo().values()
12873
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
12874

    
12875
    # node data
12876
    node_list = [n.name for n in ninfo.values() if n.vm_capable]
12877

    
12878
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
12879
      hypervisor_name = self.hypervisor
12880
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
12881
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
12882
    else:
12883
      hypervisor_name = cluster_info.enabled_hypervisors[0]
12884

    
12885
    node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
12886
                                        hypervisor_name)
12887
    node_iinfo = \
12888
      self.rpc.call_all_instances_info(node_list,
12889
                                       cluster_info.enabled_hypervisors)
12890

    
12891
    data["nodegroups"] = self._ComputeNodeGroupData(cfg)
12892

    
12893
    config_ndata = self._ComputeBasicNodeData(ninfo)
12894
    data["nodes"] = self._ComputeDynamicNodeData(ninfo, node_data, node_iinfo,
12895
                                                 i_list, config_ndata)
12896
    assert len(data["nodes"]) == len(ninfo), \
12897
        "Incomplete node data computed"
12898

    
12899
    data["instances"] = self._ComputeInstanceData(cluster_info, i_list)
12900

    
12901
    self.in_data = data
12902

    
12903
  @staticmethod
12904
  def _ComputeNodeGroupData(cfg):
12905
    """Compute node groups data.
12906

12907
    """
12908
    ng = dict((guuid, {
12909
      "name": gdata.name,
12910
      "alloc_policy": gdata.alloc_policy,
12911
      })
12912
      for guuid, gdata in cfg.GetAllNodeGroupsInfo().items())
12913

    
12914
    return ng
12915

    
12916
  @staticmethod
12917
  def _ComputeBasicNodeData(node_cfg):
12918
    """Compute global node data.
12919

12920
    @rtype: dict
12921
    @returns: a dict of name: (node dict, node config)
12922

12923
    """
12924
    # fill in static (config-based) values
12925
    node_results = dict((ninfo.name, {
12926
      "tags": list(ninfo.GetTags()),
12927
      "primary_ip": ninfo.primary_ip,
12928
      "secondary_ip": ninfo.secondary_ip,
12929
      "offline": ninfo.offline,
12930
      "drained": ninfo.drained,
12931
      "master_candidate": ninfo.master_candidate,
12932
      "group": ninfo.group,
12933
      "master_capable": ninfo.master_capable,
12934
      "vm_capable": ninfo.vm_capable,
12935
      })
12936
      for ninfo in node_cfg.values())
12937

    
12938
    return node_results
12939

    
12940
  @staticmethod
12941
  def _ComputeDynamicNodeData(node_cfg, node_data, node_iinfo, i_list,
12942
                              node_results):
12943
    """Compute global node data.
12944

12945
    @param node_results: the basic node structures as filled from the config
12946

12947
    """
12948
    # make a copy of the current dict
12949
    node_results = dict(node_results)
12950
    for nname, nresult in node_data.items():
12951
      assert nname in node_results, "Missing basic data for node %s" % nname
12952
      ninfo = node_cfg[nname]
12953

    
12954
      if not (ninfo.offline or ninfo.drained):
12955
        nresult.Raise("Can't get data for node %s" % nname)
12956
        node_iinfo[nname].Raise("Can't get node instance info from node %s" %
12957
                                nname)
12958
        remote_info = nresult.payload
12959

    
12960
        for attr in ["memory_total", "memory_free", "memory_dom0",
12961
                     "vg_size", "vg_free", "cpu_total"]:
12962
          if attr not in remote_info:
12963
            raise errors.OpExecError("Node '%s' didn't return attribute"
12964
                                     " '%s'" % (nname, attr))
12965
          if not isinstance(remote_info[attr], int):
12966
            raise errors.OpExecError("Node '%s' returned invalid value"
12967
                                     " for '%s': %s" %
12968
                                     (nname, attr, remote_info[attr]))
12969
        # compute memory used by primary instances
12970
        i_p_mem = i_p_up_mem = 0
12971
        for iinfo, beinfo in i_list:
12972
          if iinfo.primary_node == nname:
12973
            i_p_mem += beinfo[constants.BE_MEMORY]
12974
            if iinfo.name not in node_iinfo[nname].payload:
12975
              i_used_mem = 0
12976
            else:
12977
              i_used_mem = int(node_iinfo[nname].payload[iinfo.name]["memory"])
12978
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
12979
            remote_info["memory_free"] -= max(0, i_mem_diff)
12980

    
12981
            if iinfo.admin_up:
12982
              i_p_up_mem += beinfo[constants.BE_MEMORY]
12983

    
12984
        # compute memory used by instances
12985
        pnr_dyn = {
12986
          "total_memory": remote_info["memory_total"],
12987
          "reserved_memory": remote_info["memory_dom0"],
12988
          "free_memory": remote_info["memory_free"],
12989
          "total_disk": remote_info["vg_size"],
12990
          "free_disk": remote_info["vg_free"],
12991
          "total_cpus": remote_info["cpu_total"],
12992
          "i_pri_memory": i_p_mem,
12993
          "i_pri_up_memory": i_p_up_mem,
12994
          }
12995
        pnr_dyn.update(node_results[nname])
12996
        node_results[nname] = pnr_dyn
12997

    
12998
    return node_results
12999

    
13000
  @staticmethod
13001
  def _ComputeInstanceData(cluster_info, i_list):
13002
    """Compute global instance data.
13003

13004
    """
13005
    instance_data = {}
13006
    for iinfo, beinfo in i_list:
13007
      nic_data = []
13008
      for nic in iinfo.nics:
13009
        filled_params = cluster_info.SimpleFillNIC(nic.nicparams)
13010
        nic_dict = {
13011
          "mac": nic.mac,
13012
          "ip": nic.ip,
13013
          "mode": filled_params[constants.NIC_MODE],
13014
          "link": filled_params[constants.NIC_LINK],
13015
          }
13016
        if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
13017
          nic_dict["bridge"] = filled_params[constants.NIC_LINK]
13018
        nic_data.append(nic_dict)
13019
      pir = {
13020
        "tags": list(iinfo.GetTags()),
13021
        "admin_up": iinfo.admin_up,
13022
        "vcpus": beinfo[constants.BE_VCPUS],
13023
        "memory": beinfo[constants.BE_MEMORY],
13024
        "os": iinfo.os,
13025
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
13026
        "nics": nic_data,
13027
        "disks": [{constants.IDISK_SIZE: dsk.size,
13028
                   constants.IDISK_MODE: dsk.mode}
13029
                  for dsk in iinfo.disks],
13030
        "disk_template": iinfo.disk_template,
13031
        "hypervisor": iinfo.hypervisor,
13032
        }
13033
      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
13034
                                                 pir["disks"])
13035
      instance_data[iinfo.name] = pir
13036

    
13037
    return instance_data
13038

    
13039
  def _AddNewInstance(self):
13040
    """Add new instance data to allocator structure.
13041

13042
    This in combination with _AllocatorGetClusterData will create the
13043
    correct structure needed as input for the allocator.
13044

13045
    The checks for the completeness of the opcode must have already been
13046
    done.
13047

13048
    """
13049
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
13050

    
13051
    if self.disk_template in constants.DTS_INT_MIRROR:
13052
      self.required_nodes = 2
13053
    else:
13054
      self.required_nodes = 1
13055

    
13056
    request = {
13057
      "name": self.name,
13058
      "disk_template": self.disk_template,
13059
      "tags": self.tags,
13060
      "os": self.os,
13061
      "vcpus": self.vcpus,
13062
      "memory": self.memory,
13063
      "disks": self.disks,
13064
      "disk_space_total": disk_space,
13065
      "nics": self.nics,
13066
      "required_nodes": self.required_nodes,
13067
      "hypervisor": self.hypervisor,
13068
      }
13069

    
13070
    return request
13071

    
13072
  def _AddRelocateInstance(self):
13073
    """Add relocate instance data to allocator structure.
13074

13075
    This in combination with _IAllocatorGetClusterData will create the
13076
    correct structure needed as input for the allocator.
13077

13078
    The checks for the completeness of the opcode must have already been
13079
    done.
13080

13081
    """
13082
    instance = self.cfg.GetInstanceInfo(self.name)
13083
    if instance is None:
13084
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
13085
                                   " IAllocator" % self.name)
13086

    
13087
    if instance.disk_template not in constants.DTS_MIRRORED:
13088
      raise errors.OpPrereqError("Can't relocate non-mirrored instances",
13089
                                 errors.ECODE_INVAL)
13090

    
13091
    if instance.disk_template in constants.DTS_INT_MIRROR and \
13092
        len(instance.secondary_nodes) != 1:
13093
      raise errors.OpPrereqError("Instance has not exactly one secondary node",
13094
                                 errors.ECODE_STATE)
13095

    
13096
    self.required_nodes = 1
13097
    disk_sizes = [{constants.IDISK_SIZE: disk.size} for disk in instance.disks]
13098
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
13099

    
13100
    request = {
13101
      "name": self.name,
13102
      "disk_space_total": disk_space,
13103
      "required_nodes": self.required_nodes,
13104
      "relocate_from": self.relocate_from,
13105
      }
13106
    return request
13107

    
13108
  def _AddNodeEvacuate(self):
13109
    """Get data for node-evacuate requests.
13110

13111
    """
13112
    return {
13113
      "instances": self.instances,
13114
      "evac_mode": self.evac_mode,
13115
      }
13116

    
13117
  def _AddChangeGroup(self):
13118
    """Get data for node-evacuate requests.
13119

13120
    """
13121
    return {
13122
      "instances": self.instances,
13123
      "target_groups": self.target_groups,
13124
      }
13125

    
13126
  def _BuildInputData(self, fn, keydata):
13127
    """Build input data structures.
13128

13129
    """
13130
    self._ComputeClusterData()
13131

    
13132
    request = fn()
13133
    request["type"] = self.mode
13134
    for keyname, keytype in keydata:
13135
      if keyname not in request:
13136
        raise errors.ProgrammerError("Request parameter %s is missing" %
13137
                                     keyname)
13138
      val = request[keyname]
13139
      if not keytype(val):
13140
        raise errors.ProgrammerError("Request parameter %s doesn't pass"
13141
                                     " validation, value %s, expected"
13142
                                     " type %s" % (keyname, val, keytype))
13143
    self.in_data["request"] = request
13144

    
13145
    self.in_text = serializer.Dump(self.in_data)
13146

    
13147
  _STRING_LIST = ht.TListOf(ht.TString)
13148
  _JOB_LIST = ht.TListOf(ht.TListOf(ht.TStrictDict(True, False, {
13149
     # pylint: disable=E1101
13150
     # Class '...' has no 'OP_ID' member
13151
     "OP_ID": ht.TElemOf([opcodes.OpInstanceFailover.OP_ID,
13152
                          opcodes.OpInstanceMigrate.OP_ID,
13153
                          opcodes.OpInstanceReplaceDisks.OP_ID])
13154
     })))
13155

    
13156
  _NEVAC_MOVED = \
13157
    ht.TListOf(ht.TAnd(ht.TIsLength(3),
13158
                       ht.TItems([ht.TNonEmptyString,
13159
                                  ht.TNonEmptyString,
13160
                                  ht.TListOf(ht.TNonEmptyString),
13161
                                 ])))
13162
  _NEVAC_FAILED = \
13163
    ht.TListOf(ht.TAnd(ht.TIsLength(2),
13164
                       ht.TItems([ht.TNonEmptyString,
13165
                                  ht.TMaybeString,
13166
                                 ])))
13167
  _NEVAC_RESULT = ht.TAnd(ht.TIsLength(3),
13168
                          ht.TItems([_NEVAC_MOVED, _NEVAC_FAILED, _JOB_LIST]))
13169

    
13170
  _MODE_DATA = {
13171
    constants.IALLOCATOR_MODE_ALLOC:
13172
      (_AddNewInstance,
13173
       [
13174
        ("name", ht.TString),
13175
        ("memory", ht.TInt),
13176
        ("disks", ht.TListOf(ht.TDict)),
13177
        ("disk_template", ht.TString),
13178
        ("os", ht.TString),
13179
        ("tags", _STRING_LIST),
13180
        ("nics", ht.TListOf(ht.TDict)),
13181
        ("vcpus", ht.TInt),
13182
        ("hypervisor", ht.TString),
13183
        ], ht.TList),
13184
    constants.IALLOCATOR_MODE_RELOC:
13185
      (_AddRelocateInstance,
13186
       [("name", ht.TString), ("relocate_from", _STRING_LIST)],
13187
       ht.TList),
13188
     constants.IALLOCATOR_MODE_NODE_EVAC:
13189
      (_AddNodeEvacuate, [
13190
        ("instances", _STRING_LIST),
13191
        ("evac_mode", ht.TElemOf(constants.IALLOCATOR_NEVAC_MODES)),
13192
        ], _NEVAC_RESULT),
13193
     constants.IALLOCATOR_MODE_CHG_GROUP:
13194
      (_AddChangeGroup, [
13195
        ("instances", _STRING_LIST),
13196
        ("target_groups", _STRING_LIST),
13197
        ], _NEVAC_RESULT),
13198
    }
13199

    
13200
  def Run(self, name, validate=True, call_fn=None):
13201
    """Run an instance allocator and return the results.
13202

13203
    """
13204
    if call_fn is None:
13205
      call_fn = self.rpc.call_iallocator_runner
13206

    
13207
    result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
13208
    result.Raise("Failure while running the iallocator script")
13209

    
13210
    self.out_text = result.payload
13211
    if validate:
13212
      self._ValidateResult()
13213

    
13214
  def _ValidateResult(self):
13215
    """Process the allocator results.
13216

13217
    This will process and if successful save the result in
13218
    self.out_data and the other parameters.
13219

13220
    """
13221
    try:
13222
      rdict = serializer.Load(self.out_text)
13223
    except Exception, err:
13224
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
13225

    
13226
    if not isinstance(rdict, dict):
13227
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
13228

    
13229
    # TODO: remove backwards compatiblity in later versions
13230
    if "nodes" in rdict and "result" not in rdict:
13231
      rdict["result"] = rdict["nodes"]
13232
      del rdict["nodes"]
13233

    
13234
    for key in "success", "info", "result":
13235
      if key not in rdict:
13236
        raise errors.OpExecError("Can't parse iallocator results:"
13237
                                 " missing key '%s'" % key)
13238
      setattr(self, key, rdict[key])
13239

    
13240
    if not self._result_check(self.result):
13241
      raise errors.OpExecError("Iallocator returned invalid result,"
13242
                               " expected %s, got %s" %
13243
                               (self._result_check, self.result),
13244
                               errors.ECODE_INVAL)
13245

    
13246
    if self.mode == constants.IALLOCATOR_MODE_RELOC:
13247
      assert self.relocate_from is not None
13248
      assert self.required_nodes == 1
13249

    
13250
      node2group = dict((name, ndata["group"])
13251
                        for (name, ndata) in self.in_data["nodes"].items())
13252

    
13253
      fn = compat.partial(self._NodesToGroups, node2group,
13254
                          self.in_data["nodegroups"])
13255

    
13256
      instance = self.cfg.GetInstanceInfo(self.name)
13257
      request_groups = fn(self.relocate_from + [instance.primary_node])
13258
      result_groups = fn(rdict["result"] + [instance.primary_node])
13259

    
13260
      if self.success and not set(result_groups).issubset(request_groups):
13261
        raise errors.OpExecError("Groups of nodes returned by iallocator (%s)"
13262
                                 " differ from original groups (%s)" %
13263
                                 (utils.CommaJoin(result_groups),
13264
                                  utils.CommaJoin(request_groups)))
13265

    
13266
    elif self.mode == constants.IALLOCATOR_MODE_NODE_EVAC:
13267
      assert self.evac_mode in constants.IALLOCATOR_NEVAC_MODES
13268

    
13269
    self.out_data = rdict
13270

    
13271
  @staticmethod
13272
  def _NodesToGroups(node2group, groups, nodes):
13273
    """Returns a list of unique group names for a list of nodes.
13274

13275
    @type node2group: dict
13276
    @param node2group: Map from node name to group UUID
13277
    @type groups: dict
13278
    @param groups: Group information
13279
    @type nodes: list
13280
    @param nodes: Node names
13281

13282
    """
13283
    result = set()
13284

    
13285
    for node in nodes:
13286
      try:
13287
        group_uuid = node2group[node]
13288
      except KeyError:
13289
        # Ignore unknown node
13290
        pass
13291
      else:
13292
        try:
13293
          group = groups[group_uuid]
13294
        except KeyError:
13295
          # Can't find group, let's use UUID
13296
          group_name = group_uuid
13297
        else:
13298
          group_name = group["name"]
13299

    
13300
        result.add(group_name)
13301

    
13302
    return sorted(result)
13303

    
13304

    
13305
class LUTestAllocator(NoHooksLU):
13306
  """Run allocator tests.
13307

13308
  This LU runs the allocator tests
13309

13310
  """
13311
  def CheckPrereq(self):
13312
    """Check prerequisites.
13313

13314
    This checks the opcode parameters depending on the director and mode test.
13315

13316
    """
13317
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
13318
      for attr in ["memory", "disks", "disk_template",
13319
                   "os", "tags", "nics", "vcpus"]:
13320
        if not hasattr(self.op, attr):
13321
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
13322
                                     attr, errors.ECODE_INVAL)
13323
      iname = self.cfg.ExpandInstanceName(self.op.name)
13324
      if iname is not None:
13325
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
13326
                                   iname, errors.ECODE_EXISTS)
13327
      if not isinstance(self.op.nics, list):
13328
        raise errors.OpPrereqError("Invalid parameter 'nics'",
13329
                                   errors.ECODE_INVAL)
13330
      if not isinstance(self.op.disks, list):
13331
        raise errors.OpPrereqError("Invalid parameter 'disks'",
13332
                                   errors.ECODE_INVAL)
13333
      for row in self.op.disks:
13334
        if (not isinstance(row, dict) or
13335
            constants.IDISK_SIZE not in row or
13336
            not isinstance(row[constants.IDISK_SIZE], int) or
13337
            constants.IDISK_MODE not in row or
13338
            row[constants.IDISK_MODE] not in constants.DISK_ACCESS_SET):
13339
          raise errors.OpPrereqError("Invalid contents of the 'disks'"
13340
                                     " parameter", errors.ECODE_INVAL)
13341
      if self.op.hypervisor is None:
13342
        self.op.hypervisor = self.cfg.GetHypervisorType()
13343
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
13344
      fname = _ExpandInstanceName(self.cfg, self.op.name)
13345
      self.op.name = fname
13346
      self.relocate_from = \
13347
          list(self.cfg.GetInstanceInfo(fname).secondary_nodes)
13348
    elif self.op.mode in (constants.IALLOCATOR_MODE_CHG_GROUP,
13349
                          constants.IALLOCATOR_MODE_NODE_EVAC):
13350
      if not self.op.instances:
13351
        raise errors.OpPrereqError("Missing instances", errors.ECODE_INVAL)
13352
      self.op.instances = _GetWantedInstances(self, self.op.instances)
13353
    else:
13354
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
13355
                                 self.op.mode, errors.ECODE_INVAL)
13356

    
13357
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
13358
      if self.op.allocator is None:
13359
        raise errors.OpPrereqError("Missing allocator name",
13360
                                   errors.ECODE_INVAL)
13361
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
13362
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
13363
                                 self.op.direction, errors.ECODE_INVAL)
13364

    
13365
  def Exec(self, feedback_fn):
13366
    """Run the allocator test.
13367

13368
    """
13369
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
13370
      ial = IAllocator(self.cfg, self.rpc,
13371
                       mode=self.op.mode,
13372
                       name=self.op.name,
13373
                       memory=self.op.memory,
13374
                       disks=self.op.disks,
13375
                       disk_template=self.op.disk_template,
13376
                       os=self.op.os,
13377
                       tags=self.op.tags,
13378
                       nics=self.op.nics,
13379
                       vcpus=self.op.vcpus,
13380
                       hypervisor=self.op.hypervisor,
13381
                       )
13382
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
13383
      ial = IAllocator(self.cfg, self.rpc,
13384
                       mode=self.op.mode,
13385
                       name=self.op.name,
13386
                       relocate_from=list(self.relocate_from),
13387
                       )
13388
    elif self.op.mode == constants.IALLOCATOR_MODE_CHG_GROUP:
13389
      ial = IAllocator(self.cfg, self.rpc,
13390
                       mode=self.op.mode,
13391
                       instances=self.op.instances,
13392
                       target_groups=self.op.target_groups)
13393
    elif self.op.mode == constants.IALLOCATOR_MODE_NODE_EVAC:
13394
      ial = IAllocator(self.cfg, self.rpc,
13395
                       mode=self.op.mode,
13396
                       instances=self.op.instances,
13397
                       evac_mode=self.op.evac_mode)
13398
    else:
13399
      raise errors.ProgrammerError("Uncatched mode %s in"
13400
                                   " LUTestAllocator.Exec", self.op.mode)
13401

    
13402
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
13403
      result = ial.in_text
13404
    else:
13405
      ial.Run(self.op.allocator, validate=False)
13406
      result = ial.out_text
13407
    return result
13408

    
13409

    
13410
#: Query type implementations
13411
_QUERY_IMPL = {
13412
  constants.QR_INSTANCE: _InstanceQuery,
13413
  constants.QR_NODE: _NodeQuery,
13414
  constants.QR_GROUP: _GroupQuery,
13415
  constants.QR_OS: _OsQuery,
13416
  }
13417

    
13418
assert set(_QUERY_IMPL.keys()) == constants.QR_VIA_OP
13419

    
13420

    
13421
def _GetQueryImplementation(name):
13422
  """Returns the implemtnation for a query type.
13423

13424
  @param name: Query type, must be one of L{constants.QR_VIA_OP}
13425

13426
  """
13427
  try:
13428
    return _QUERY_IMPL[name]
13429
  except KeyError:
13430
    raise errors.OpPrereqError("Unknown query resource '%s'" % name,
13431
                               errors.ECODE_INVAL)