Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 55011921

History | View | Annotate | Download (431.8 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the master-side code."""
23

    
24
# pylint: disable-msg=W0201,C0302
25

    
26
# W0201 since most LU attributes are defined in CheckPrereq or similar
27
# functions
28

    
29
# C0302: since we have waaaay to many lines in this module
30

    
31
import os
32
import os.path
33
import time
34
import re
35
import platform
36
import logging
37
import copy
38
import OpenSSL
39
import socket
40
import tempfile
41
import shutil
42
import itertools
43

    
44
from ganeti import ssh
45
from ganeti import utils
46
from ganeti import errors
47
from ganeti import hypervisor
48
from ganeti import locking
49
from ganeti import constants
50
from ganeti import objects
51
from ganeti import serializer
52
from ganeti import ssconf
53
from ganeti import uidpool
54
from ganeti import compat
55
from ganeti import masterd
56
from ganeti import netutils
57
from ganeti import query
58
from ganeti import qlang
59
from ganeti import opcodes
60

    
61
import ganeti.masterd.instance # pylint: disable-msg=W0611
62

    
63

    
64
def _SupportsOob(cfg, node):
65
  """Tells if node supports OOB.
66

67
  @type cfg: L{config.ConfigWriter}
68
  @param cfg: The cluster configuration
69
  @type node: L{objects.Node}
70
  @param node: The node
71
  @return: The OOB script if supported or an empty string otherwise
72

73
  """
74
  return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
75

    
76

    
77
class ResultWithJobs:
78
  """Data container for LU results with jobs.
79

80
  Instances of this class returned from L{LogicalUnit.Exec} will be recognized
81
  by L{mcpu.Processor._ProcessResult}. The latter will then submit the jobs
82
  contained in the C{jobs} attribute and include the job IDs in the opcode
83
  result.
84

85
  """
86
  def __init__(self, jobs, **kwargs):
87
    """Initializes this class.
88

89
    Additional return values can be specified as keyword arguments.
90

91
    @type jobs: list of lists of L{opcode.OpCode}
92
    @param jobs: A list of lists of opcode objects
93

94
    """
95
    self.jobs = jobs
96
    self.other = kwargs
97

    
98

    
99
class LogicalUnit(object):
100
  """Logical Unit base class.
101

102
  Subclasses must follow these rules:
103
    - implement ExpandNames
104
    - implement CheckPrereq (except when tasklets are used)
105
    - implement Exec (except when tasklets are used)
106
    - implement BuildHooksEnv
107
    - implement BuildHooksNodes
108
    - redefine HPATH and HTYPE
109
    - optionally redefine their run requirements:
110
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
111

112
  Note that all commands require root permissions.
113

114
  @ivar dry_run_result: the value (if any) that will be returned to the caller
115
      in dry-run mode (signalled by opcode dry_run parameter)
116

117
  """
118
  HPATH = None
119
  HTYPE = None
120
  REQ_BGL = True
121

    
122
  def __init__(self, processor, op, context, rpc):
123
    """Constructor for LogicalUnit.
124

125
    This needs to be overridden in derived classes in order to check op
126
    validity.
127

128
    """
129
    self.proc = processor
130
    self.op = op
131
    self.cfg = context.cfg
132
    self.glm = context.glm
133
    self.context = context
134
    self.rpc = rpc
135
    # Dicts used to declare locking needs to mcpu
136
    self.needed_locks = None
137
    self.share_locks = dict.fromkeys(locking.LEVELS, 0)
138
    self.add_locks = {}
139
    self.remove_locks = {}
140
    # Used to force good behavior when calling helper functions
141
    self.recalculate_locks = {}
142
    # logging
143
    self.Log = processor.Log # pylint: disable-msg=C0103
144
    self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103
145
    self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103
146
    self.LogStep = processor.LogStep # pylint: disable-msg=C0103
147
    # support for dry-run
148
    self.dry_run_result = None
149
    # support for generic debug attribute
150
    if (not hasattr(self.op, "debug_level") or
151
        not isinstance(self.op.debug_level, int)):
152
      self.op.debug_level = 0
153

    
154
    # Tasklets
155
    self.tasklets = None
156

    
157
    # Validate opcode parameters and set defaults
158
    self.op.Validate(True)
159

    
160
    self.CheckArguments()
161

    
162
  def CheckArguments(self):
163
    """Check syntactic validity for the opcode arguments.
164

165
    This method is for doing a simple syntactic check and ensure
166
    validity of opcode parameters, without any cluster-related
167
    checks. While the same can be accomplished in ExpandNames and/or
168
    CheckPrereq, doing these separate is better because:
169

170
      - ExpandNames is left as as purely a lock-related function
171
      - CheckPrereq is run after we have acquired locks (and possible
172
        waited for them)
173

174
    The function is allowed to change the self.op attribute so that
175
    later methods can no longer worry about missing parameters.
176

177
    """
178
    pass
179

    
180
  def ExpandNames(self):
181
    """Expand names for this LU.
182

183
    This method is called before starting to execute the opcode, and it should
184
    update all the parameters of the opcode to their canonical form (e.g. a
185
    short node name must be fully expanded after this method has successfully
186
    completed). This way locking, hooks, logging, etc. can work correctly.
187

188
    LUs which implement this method must also populate the self.needed_locks
189
    member, as a dict with lock levels as keys, and a list of needed lock names
190
    as values. Rules:
191

192
      - use an empty dict if you don't need any lock
193
      - if you don't need any lock at a particular level omit that level
194
      - don't put anything for the BGL level
195
      - if you want all locks at a level use locking.ALL_SET as a value
196

197
    If you need to share locks (rather than acquire them exclusively) at one
198
    level you can modify self.share_locks, setting a true value (usually 1) for
199
    that level. By default locks are not shared.
200

201
    This function can also define a list of tasklets, which then will be
202
    executed in order instead of the usual LU-level CheckPrereq and Exec
203
    functions, if those are not defined by the LU.
204

205
    Examples::
206

207
      # Acquire all nodes and one instance
208
      self.needed_locks = {
209
        locking.LEVEL_NODE: locking.ALL_SET,
210
        locking.LEVEL_INSTANCE: ['instance1.example.com'],
211
      }
212
      # Acquire just two nodes
213
      self.needed_locks = {
214
        locking.LEVEL_NODE: ['node1.example.com', 'node2.example.com'],
215
      }
216
      # Acquire no locks
217
      self.needed_locks = {} # No, you can't leave it to the default value None
218

219
    """
220
    # The implementation of this method is mandatory only if the new LU is
221
    # concurrent, so that old LUs don't need to be changed all at the same
222
    # time.
223
    if self.REQ_BGL:
224
      self.needed_locks = {} # Exclusive LUs don't need locks.
225
    else:
226
      raise NotImplementedError
227

    
228
  def DeclareLocks(self, level):
229
    """Declare LU locking needs for a level
230

231
    While most LUs can just declare their locking needs at ExpandNames time,
232
    sometimes there's the need to calculate some locks after having acquired
233
    the ones before. This function is called just before acquiring locks at a
234
    particular level, but after acquiring the ones at lower levels, and permits
235
    such calculations. It can be used to modify self.needed_locks, and by
236
    default it does nothing.
237

238
    This function is only called if you have something already set in
239
    self.needed_locks for the level.
240

241
    @param level: Locking level which is going to be locked
242
    @type level: member of ganeti.locking.LEVELS
243

244
    """
245

    
246
  def CheckPrereq(self):
247
    """Check prerequisites for this LU.
248

249
    This method should check that the prerequisites for the execution
250
    of this LU are fulfilled. It can do internode communication, but
251
    it should be idempotent - no cluster or system changes are
252
    allowed.
253

254
    The method should raise errors.OpPrereqError in case something is
255
    not fulfilled. Its return value is ignored.
256

257
    This method should also update all the parameters of the opcode to
258
    their canonical form if it hasn't been done by ExpandNames before.
259

260
    """
261
    if self.tasklets is not None:
262
      for (idx, tl) in enumerate(self.tasklets):
263
        logging.debug("Checking prerequisites for tasklet %s/%s",
264
                      idx + 1, len(self.tasklets))
265
        tl.CheckPrereq()
266
    else:
267
      pass
268

    
269
  def Exec(self, feedback_fn):
270
    """Execute the LU.
271

272
    This method should implement the actual work. It should raise
273
    errors.OpExecError for failures that are somewhat dealt with in
274
    code, or expected.
275

276
    """
277
    if self.tasklets is not None:
278
      for (idx, tl) in enumerate(self.tasklets):
279
        logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
280
        tl.Exec(feedback_fn)
281
    else:
282
      raise NotImplementedError
283

    
284
  def BuildHooksEnv(self):
285
    """Build hooks environment for this LU.
286

287
    @rtype: dict
288
    @return: Dictionary containing the environment that will be used for
289
      running the hooks for this LU. The keys of the dict must not be prefixed
290
      with "GANETI_"--that'll be added by the hooks runner. The hooks runner
291
      will extend the environment with additional variables. If no environment
292
      should be defined, an empty dictionary should be returned (not C{None}).
293
    @note: If the C{HPATH} attribute of the LU class is C{None}, this function
294
      will not be called.
295

296
    """
297
    raise NotImplementedError
298

    
299
  def BuildHooksNodes(self):
300
    """Build list of nodes to run LU's hooks.
301

302
    @rtype: tuple; (list, list)
303
    @return: Tuple containing a list of node names on which the hook
304
      should run before the execution and a list of node names on which the
305
      hook should run after the execution. No nodes should be returned as an
306
      empty list (and not None).
307
    @note: If the C{HPATH} attribute of the LU class is C{None}, this function
308
      will not be called.
309

310
    """
311
    raise NotImplementedError
312

    
313
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
314
    """Notify the LU about the results of its hooks.
315

316
    This method is called every time a hooks phase is executed, and notifies
317
    the Logical Unit about the hooks' result. The LU can then use it to alter
318
    its result based on the hooks.  By default the method does nothing and the
319
    previous result is passed back unchanged but any LU can define it if it
320
    wants to use the local cluster hook-scripts somehow.
321

322
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
323
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
324
    @param hook_results: the results of the multi-node hooks rpc call
325
    @param feedback_fn: function used send feedback back to the caller
326
    @param lu_result: the previous Exec result this LU had, or None
327
        in the PRE phase
328
    @return: the new Exec result, based on the previous result
329
        and hook results
330

331
    """
332
    # API must be kept, thus we ignore the unused argument and could
333
    # be a function warnings
334
    # pylint: disable-msg=W0613,R0201
335
    return lu_result
336

    
337
  def _ExpandAndLockInstance(self):
338
    """Helper function to expand and lock an instance.
339

340
    Many LUs that work on an instance take its name in self.op.instance_name
341
    and need to expand it and then declare the expanded name for locking. This
342
    function does it, and then updates self.op.instance_name to the expanded
343
    name. It also initializes needed_locks as a dict, if this hasn't been done
344
    before.
345

346
    """
347
    if self.needed_locks is None:
348
      self.needed_locks = {}
349
    else:
350
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
351
        "_ExpandAndLockInstance called with instance-level locks set"
352
    self.op.instance_name = _ExpandInstanceName(self.cfg,
353
                                                self.op.instance_name)
354
    self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
355

    
356
  def _LockInstancesNodes(self, primary_only=False):
357
    """Helper function to declare instances' nodes for locking.
358

359
    This function should be called after locking one or more instances to lock
360
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
361
    with all primary or secondary nodes for instances already locked and
362
    present in self.needed_locks[locking.LEVEL_INSTANCE].
363

364
    It should be called from DeclareLocks, and for safety only works if
365
    self.recalculate_locks[locking.LEVEL_NODE] is set.
366

367
    In the future it may grow parameters to just lock some instance's nodes, or
368
    to just lock primaries or secondary nodes, if needed.
369

370
    If should be called in DeclareLocks in a way similar to::
371

372
      if level == locking.LEVEL_NODE:
373
        self._LockInstancesNodes()
374

375
    @type primary_only: boolean
376
    @param primary_only: only lock primary nodes of locked instances
377

378
    """
379
    assert locking.LEVEL_NODE in self.recalculate_locks, \
380
      "_LockInstancesNodes helper function called with no nodes to recalculate"
381

    
382
    # TODO: check if we're really been called with the instance locks held
383

    
384
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
385
    # future we might want to have different behaviors depending on the value
386
    # of self.recalculate_locks[locking.LEVEL_NODE]
387
    wanted_nodes = []
388
    for instance_name in self.glm.list_owned(locking.LEVEL_INSTANCE):
389
      instance = self.context.cfg.GetInstanceInfo(instance_name)
390
      wanted_nodes.append(instance.primary_node)
391
      if not primary_only:
392
        wanted_nodes.extend(instance.secondary_nodes)
393

    
394
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
395
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
396
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
397
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
398

    
399
    del self.recalculate_locks[locking.LEVEL_NODE]
400

    
401

    
402
class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223
403
  """Simple LU which runs no hooks.
404

405
  This LU is intended as a parent for other LogicalUnits which will
406
  run no hooks, in order to reduce duplicate code.
407

408
  """
409
  HPATH = None
410
  HTYPE = None
411

    
412
  def BuildHooksEnv(self):
413
    """Empty BuildHooksEnv for NoHooksLu.
414

415
    This just raises an error.
416

417
    """
418
    raise AssertionError("BuildHooksEnv called for NoHooksLUs")
419

    
420
  def BuildHooksNodes(self):
421
    """Empty BuildHooksNodes for NoHooksLU.
422

423
    """
424
    raise AssertionError("BuildHooksNodes called for NoHooksLU")
425

    
426

    
427
class Tasklet:
428
  """Tasklet base class.
429

430
  Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
431
  they can mix legacy code with tasklets. Locking needs to be done in the LU,
432
  tasklets know nothing about locks.
433

434
  Subclasses must follow these rules:
435
    - Implement CheckPrereq
436
    - Implement Exec
437

438
  """
439
  def __init__(self, lu):
440
    self.lu = lu
441

    
442
    # Shortcuts
443
    self.cfg = lu.cfg
444
    self.rpc = lu.rpc
445

    
446
  def CheckPrereq(self):
447
    """Check prerequisites for this tasklets.
448

449
    This method should check whether the prerequisites for the execution of
450
    this tasklet are fulfilled. It can do internode communication, but it
451
    should be idempotent - no cluster or system changes are allowed.
452

453
    The method should raise errors.OpPrereqError in case something is not
454
    fulfilled. Its return value is ignored.
455

456
    This method should also update all parameters to their canonical form if it
457
    hasn't been done before.
458

459
    """
460
    pass
461

    
462
  def Exec(self, feedback_fn):
463
    """Execute the tasklet.
464

465
    This method should implement the actual work. It should raise
466
    errors.OpExecError for failures that are somewhat dealt with in code, or
467
    expected.
468

469
    """
470
    raise NotImplementedError
471

    
472

    
473
class _QueryBase:
474
  """Base for query utility classes.
475

476
  """
477
  #: Attribute holding field definitions
478
  FIELDS = None
479

    
480
  def __init__(self, filter_, fields, use_locking):
481
    """Initializes this class.
482

483
    """
484
    self.use_locking = use_locking
485

    
486
    self.query = query.Query(self.FIELDS, fields, filter_=filter_,
487
                             namefield="name")
488
    self.requested_data = self.query.RequestedData()
489
    self.names = self.query.RequestedNames()
490

    
491
    # Sort only if no names were requested
492
    self.sort_by_name = not self.names
493

    
494
    self.do_locking = None
495
    self.wanted = None
496

    
497
  def _GetNames(self, lu, all_names, lock_level):
498
    """Helper function to determine names asked for in the query.
499

500
    """
501
    if self.do_locking:
502
      names = lu.glm.list_owned(lock_level)
503
    else:
504
      names = all_names
505

    
506
    if self.wanted == locking.ALL_SET:
507
      assert not self.names
508
      # caller didn't specify names, so ordering is not important
509
      return utils.NiceSort(names)
510

    
511
    # caller specified names and we must keep the same order
512
    assert self.names
513
    assert not self.do_locking or lu.glm.is_owned(lock_level)
514

    
515
    missing = set(self.wanted).difference(names)
516
    if missing:
517
      raise errors.OpExecError("Some items were removed before retrieving"
518
                               " their data: %s" % missing)
519

    
520
    # Return expanded names
521
    return self.wanted
522

    
523
  def ExpandNames(self, lu):
524
    """Expand names for this query.
525

526
    See L{LogicalUnit.ExpandNames}.
527

528
    """
529
    raise NotImplementedError()
530

    
531
  def DeclareLocks(self, lu, level):
532
    """Declare locks for this query.
533

534
    See L{LogicalUnit.DeclareLocks}.
535

536
    """
537
    raise NotImplementedError()
538

    
539
  def _GetQueryData(self, lu):
540
    """Collects all data for this query.
541

542
    @return: Query data object
543

544
    """
545
    raise NotImplementedError()
546

    
547
  def NewStyleQuery(self, lu):
548
    """Collect data and execute query.
549

550
    """
551
    return query.GetQueryResponse(self.query, self._GetQueryData(lu),
552
                                  sort_by_name=self.sort_by_name)
553

    
554
  def OldStyleQuery(self, lu):
555
    """Collect data and execute query.
556

557
    """
558
    return self.query.OldStyleQuery(self._GetQueryData(lu),
559
                                    sort_by_name=self.sort_by_name)
560

    
561

    
562
def _GetWantedNodes(lu, nodes):
563
  """Returns list of checked and expanded node names.
564

565
  @type lu: L{LogicalUnit}
566
  @param lu: the logical unit on whose behalf we execute
567
  @type nodes: list
568
  @param nodes: list of node names or None for all nodes
569
  @rtype: list
570
  @return: the list of nodes, sorted
571
  @raise errors.ProgrammerError: if the nodes parameter is wrong type
572

573
  """
574
  if nodes:
575
    return [_ExpandNodeName(lu.cfg, name) for name in nodes]
576

    
577
  return utils.NiceSort(lu.cfg.GetNodeList())
578

    
579

    
580
def _GetWantedInstances(lu, instances):
581
  """Returns list of checked and expanded instance names.
582

583
  @type lu: L{LogicalUnit}
584
  @param lu: the logical unit on whose behalf we execute
585
  @type instances: list
586
  @param instances: list of instance names or None for all instances
587
  @rtype: list
588
  @return: the list of instances, sorted
589
  @raise errors.OpPrereqError: if the instances parameter is wrong type
590
  @raise errors.OpPrereqError: if any of the passed instances is not found
591

592
  """
593
  if instances:
594
    wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
595
  else:
596
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
597
  return wanted
598

    
599

    
600
def _GetUpdatedParams(old_params, update_dict,
601
                      use_default=True, use_none=False):
602
  """Return the new version of a parameter dictionary.
603

604
  @type old_params: dict
605
  @param old_params: old parameters
606
  @type update_dict: dict
607
  @param update_dict: dict containing new parameter values, or
608
      constants.VALUE_DEFAULT to reset the parameter to its default
609
      value
610
  @param use_default: boolean
611
  @type use_default: whether to recognise L{constants.VALUE_DEFAULT}
612
      values as 'to be deleted' values
613
  @param use_none: boolean
614
  @type use_none: whether to recognise C{None} values as 'to be
615
      deleted' values
616
  @rtype: dict
617
  @return: the new parameter dictionary
618

619
  """
620
  params_copy = copy.deepcopy(old_params)
621
  for key, val in update_dict.iteritems():
622
    if ((use_default and val == constants.VALUE_DEFAULT) or
623
        (use_none and val is None)):
624
      try:
625
        del params_copy[key]
626
      except KeyError:
627
        pass
628
    else:
629
      params_copy[key] = val
630
  return params_copy
631

    
632

    
633
def _ReleaseLocks(lu, level, names=None, keep=None):
634
  """Releases locks owned by an LU.
635

636
  @type lu: L{LogicalUnit}
637
  @param level: Lock level
638
  @type names: list or None
639
  @param names: Names of locks to release
640
  @type keep: list or None
641
  @param keep: Names of locks to retain
642

643
  """
644
  assert not (keep is not None and names is not None), \
645
         "Only one of the 'names' and the 'keep' parameters can be given"
646

    
647
  if names is not None:
648
    should_release = names.__contains__
649
  elif keep:
650
    should_release = lambda name: name not in keep
651
  else:
652
    should_release = None
653

    
654
  if should_release:
655
    retain = []
656
    release = []
657

    
658
    # Determine which locks to release
659
    for name in lu.glm.list_owned(level):
660
      if should_release(name):
661
        release.append(name)
662
      else:
663
        retain.append(name)
664

    
665
    assert len(lu.glm.list_owned(level)) == (len(retain) + len(release))
666

    
667
    # Release just some locks
668
    lu.glm.release(level, names=release)
669

    
670
    assert frozenset(lu.glm.list_owned(level)) == frozenset(retain)
671
  else:
672
    # Release everything
673
    lu.glm.release(level)
674

    
675
    assert not lu.glm.is_owned(level), "No locks should be owned"
676

    
677

    
678
def _RunPostHook(lu, node_name):
679
  """Runs the post-hook for an opcode on a single node.
680

681
  """
682
  hm = lu.proc.hmclass(lu.rpc.call_hooks_runner, lu)
683
  try:
684
    hm.RunPhase(constants.HOOKS_PHASE_POST, nodes=[node_name])
685
  except:
686
    # pylint: disable-msg=W0702
687
    lu.LogWarning("Errors occurred running hooks on %s" % node_name)
688

    
689

    
690
def _CheckOutputFields(static, dynamic, selected):
691
  """Checks whether all selected fields are valid.
692

693
  @type static: L{utils.FieldSet}
694
  @param static: static fields set
695
  @type dynamic: L{utils.FieldSet}
696
  @param dynamic: dynamic fields set
697

698
  """
699
  f = utils.FieldSet()
700
  f.Extend(static)
701
  f.Extend(dynamic)
702

    
703
  delta = f.NonMatching(selected)
704
  if delta:
705
    raise errors.OpPrereqError("Unknown output fields selected: %s"
706
                               % ",".join(delta), errors.ECODE_INVAL)
707

    
708

    
709
def _CheckGlobalHvParams(params):
710
  """Validates that given hypervisor params are not global ones.
711

712
  This will ensure that instances don't get customised versions of
713
  global params.
714

715
  """
716
  used_globals = constants.HVC_GLOBALS.intersection(params)
717
  if used_globals:
718
    msg = ("The following hypervisor parameters are global and cannot"
719
           " be customized at instance level, please modify them at"
720
           " cluster level: %s" % utils.CommaJoin(used_globals))
721
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
722

    
723

    
724
def _CheckNodeOnline(lu, node, msg=None):
725
  """Ensure that a given node is online.
726

727
  @param lu: the LU on behalf of which we make the check
728
  @param node: the node to check
729
  @param msg: if passed, should be a message to replace the default one
730
  @raise errors.OpPrereqError: if the node is offline
731

732
  """
733
  if msg is None:
734
    msg = "Can't use offline node"
735
  if lu.cfg.GetNodeInfo(node).offline:
736
    raise errors.OpPrereqError("%s: %s" % (msg, node), errors.ECODE_STATE)
737

    
738

    
739
def _CheckNodeNotDrained(lu, node):
740
  """Ensure that a given node is not drained.
741

742
  @param lu: the LU on behalf of which we make the check
743
  @param node: the node to check
744
  @raise errors.OpPrereqError: if the node is drained
745

746
  """
747
  if lu.cfg.GetNodeInfo(node).drained:
748
    raise errors.OpPrereqError("Can't use drained node %s" % node,
749
                               errors.ECODE_STATE)
750

    
751

    
752
def _CheckNodeVmCapable(lu, node):
753
  """Ensure that a given node is vm capable.
754

755
  @param lu: the LU on behalf of which we make the check
756
  @param node: the node to check
757
  @raise errors.OpPrereqError: if the node is not vm capable
758

759
  """
760
  if not lu.cfg.GetNodeInfo(node).vm_capable:
761
    raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node,
762
                               errors.ECODE_STATE)
763

    
764

    
765
def _CheckNodeHasOS(lu, node, os_name, force_variant):
766
  """Ensure that a node supports a given OS.
767

768
  @param lu: the LU on behalf of which we make the check
769
  @param node: the node to check
770
  @param os_name: the OS to query about
771
  @param force_variant: whether to ignore variant errors
772
  @raise errors.OpPrereqError: if the node is not supporting the OS
773

774
  """
775
  result = lu.rpc.call_os_get(node, os_name)
776
  result.Raise("OS '%s' not in supported OS list for node %s" %
777
               (os_name, node),
778
               prereq=True, ecode=errors.ECODE_INVAL)
779
  if not force_variant:
780
    _CheckOSVariant(result.payload, os_name)
781

    
782

    
783
def _CheckNodeHasSecondaryIP(lu, node, secondary_ip, prereq):
784
  """Ensure that a node has the given secondary ip.
785

786
  @type lu: L{LogicalUnit}
787
  @param lu: the LU on behalf of which we make the check
788
  @type node: string
789
  @param node: the node to check
790
  @type secondary_ip: string
791
  @param secondary_ip: the ip to check
792
  @type prereq: boolean
793
  @param prereq: whether to throw a prerequisite or an execute error
794
  @raise errors.OpPrereqError: if the node doesn't have the ip, and prereq=True
795
  @raise errors.OpExecError: if the node doesn't have the ip, and prereq=False
796

797
  """
798
  result = lu.rpc.call_node_has_ip_address(node, secondary_ip)
799
  result.Raise("Failure checking secondary ip on node %s" % node,
800
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
801
  if not result.payload:
802
    msg = ("Node claims it doesn't have the secondary ip you gave (%s),"
803
           " please fix and re-run this command" % secondary_ip)
804
    if prereq:
805
      raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
806
    else:
807
      raise errors.OpExecError(msg)
808

    
809

    
810
def _GetClusterDomainSecret():
811
  """Reads the cluster domain secret.
812

813
  """
814
  return utils.ReadOneLineFile(constants.CLUSTER_DOMAIN_SECRET_FILE,
815
                               strict=True)
816

    
817

    
818
def _CheckInstanceDown(lu, instance, reason):
819
  """Ensure that an instance is not running."""
820
  if instance.admin_up:
821
    raise errors.OpPrereqError("Instance %s is marked to be up, %s" %
822
                               (instance.name, reason), errors.ECODE_STATE)
823

    
824
  pnode = instance.primary_node
825
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
826
  ins_l.Raise("Can't contact node %s for instance information" % pnode,
827
              prereq=True, ecode=errors.ECODE_ENVIRON)
828

    
829
  if instance.name in ins_l.payload:
830
    raise errors.OpPrereqError("Instance %s is running, %s" %
831
                               (instance.name, reason), errors.ECODE_STATE)
832

    
833

    
834
def _ExpandItemName(fn, name, kind):
835
  """Expand an item name.
836

837
  @param fn: the function to use for expansion
838
  @param name: requested item name
839
  @param kind: text description ('Node' or 'Instance')
840
  @return: the resolved (full) name
841
  @raise errors.OpPrereqError: if the item is not found
842

843
  """
844
  full_name = fn(name)
845
  if full_name is None:
846
    raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
847
                               errors.ECODE_NOENT)
848
  return full_name
849

    
850

    
851
def _ExpandNodeName(cfg, name):
852
  """Wrapper over L{_ExpandItemName} for nodes."""
853
  return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
854

    
855

    
856
def _ExpandInstanceName(cfg, name):
857
  """Wrapper over L{_ExpandItemName} for instance."""
858
  return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
859

    
860

    
861
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
862
                          memory, vcpus, nics, disk_template, disks,
863
                          bep, hvp, hypervisor_name):
864
  """Builds instance related env variables for hooks
865

866
  This builds the hook environment from individual variables.
867

868
  @type name: string
869
  @param name: the name of the instance
870
  @type primary_node: string
871
  @param primary_node: the name of the instance's primary node
872
  @type secondary_nodes: list
873
  @param secondary_nodes: list of secondary nodes as strings
874
  @type os_type: string
875
  @param os_type: the name of the instance's OS
876
  @type status: boolean
877
  @param status: the should_run status of the instance
878
  @type memory: string
879
  @param memory: the memory size of the instance
880
  @type vcpus: string
881
  @param vcpus: the count of VCPUs the instance has
882
  @type nics: list
883
  @param nics: list of tuples (ip, mac, mode, link) representing
884
      the NICs the instance has
885
  @type disk_template: string
886
  @param disk_template: the disk template of the instance
887
  @type disks: list
888
  @param disks: the list of (size, mode) pairs
889
  @type bep: dict
890
  @param bep: the backend parameters for the instance
891
  @type hvp: dict
892
  @param hvp: the hypervisor parameters for the instance
893
  @type hypervisor_name: string
894
  @param hypervisor_name: the hypervisor for the instance
895
  @rtype: dict
896
  @return: the hook environment for this instance
897

898
  """
899
  if status:
900
    str_status = "up"
901
  else:
902
    str_status = "down"
903
  env = {
904
    "OP_TARGET": name,
905
    "INSTANCE_NAME": name,
906
    "INSTANCE_PRIMARY": primary_node,
907
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
908
    "INSTANCE_OS_TYPE": os_type,
909
    "INSTANCE_STATUS": str_status,
910
    "INSTANCE_MEMORY": memory,
911
    "INSTANCE_VCPUS": vcpus,
912
    "INSTANCE_DISK_TEMPLATE": disk_template,
913
    "INSTANCE_HYPERVISOR": hypervisor_name,
914
  }
915

    
916
  if nics:
917
    nic_count = len(nics)
918
    for idx, (ip, mac, mode, link) in enumerate(nics):
919
      if ip is None:
920
        ip = ""
921
      env["INSTANCE_NIC%d_IP" % idx] = ip
922
      env["INSTANCE_NIC%d_MAC" % idx] = mac
923
      env["INSTANCE_NIC%d_MODE" % idx] = mode
924
      env["INSTANCE_NIC%d_LINK" % idx] = link
925
      if mode == constants.NIC_MODE_BRIDGED:
926
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
927
  else:
928
    nic_count = 0
929

    
930
  env["INSTANCE_NIC_COUNT"] = nic_count
931

    
932
  if disks:
933
    disk_count = len(disks)
934
    for idx, (size, mode) in enumerate(disks):
935
      env["INSTANCE_DISK%d_SIZE" % idx] = size
936
      env["INSTANCE_DISK%d_MODE" % idx] = mode
937
  else:
938
    disk_count = 0
939

    
940
  env["INSTANCE_DISK_COUNT"] = disk_count
941

    
942
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
943
    for key, value in source.items():
944
      env["INSTANCE_%s_%s" % (kind, key)] = value
945

    
946
  return env
947

    
948

    
949
def _NICListToTuple(lu, nics):
950
  """Build a list of nic information tuples.
951

952
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
953
  value in LUInstanceQueryData.
954

955
  @type lu:  L{LogicalUnit}
956
  @param lu: the logical unit on whose behalf we execute
957
  @type nics: list of L{objects.NIC}
958
  @param nics: list of nics to convert to hooks tuples
959

960
  """
961
  hooks_nics = []
962
  cluster = lu.cfg.GetClusterInfo()
963
  for nic in nics:
964
    ip = nic.ip
965
    mac = nic.mac
966
    filled_params = cluster.SimpleFillNIC(nic.nicparams)
967
    mode = filled_params[constants.NIC_MODE]
968
    link = filled_params[constants.NIC_LINK]
969
    hooks_nics.append((ip, mac, mode, link))
970
  return hooks_nics
971

    
972

    
973
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
974
  """Builds instance related env variables for hooks from an object.
975

976
  @type lu: L{LogicalUnit}
977
  @param lu: the logical unit on whose behalf we execute
978
  @type instance: L{objects.Instance}
979
  @param instance: the instance for which we should build the
980
      environment
981
  @type override: dict
982
  @param override: dictionary with key/values that will override
983
      our values
984
  @rtype: dict
985
  @return: the hook environment dictionary
986

987
  """
988
  cluster = lu.cfg.GetClusterInfo()
989
  bep = cluster.FillBE(instance)
990
  hvp = cluster.FillHV(instance)
991
  args = {
992
    'name': instance.name,
993
    'primary_node': instance.primary_node,
994
    'secondary_nodes': instance.secondary_nodes,
995
    'os_type': instance.os,
996
    'status': instance.admin_up,
997
    'memory': bep[constants.BE_MEMORY],
998
    'vcpus': bep[constants.BE_VCPUS],
999
    'nics': _NICListToTuple(lu, instance.nics),
1000
    'disk_template': instance.disk_template,
1001
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
1002
    'bep': bep,
1003
    'hvp': hvp,
1004
    'hypervisor_name': instance.hypervisor,
1005
  }
1006
  if override:
1007
    args.update(override)
1008
  return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142
1009

    
1010

    
1011
def _AdjustCandidatePool(lu, exceptions):
1012
  """Adjust the candidate pool after node operations.
1013

1014
  """
1015
  mod_list = lu.cfg.MaintainCandidatePool(exceptions)
1016
  if mod_list:
1017
    lu.LogInfo("Promoted nodes to master candidate role: %s",
1018
               utils.CommaJoin(node.name for node in mod_list))
1019
    for name in mod_list:
1020
      lu.context.ReaddNode(name)
1021
  mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
1022
  if mc_now > mc_max:
1023
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
1024
               (mc_now, mc_max))
1025

    
1026

    
1027
def _DecideSelfPromotion(lu, exceptions=None):
1028
  """Decide whether I should promote myself as a master candidate.
1029

1030
  """
1031
  cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
1032
  mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
1033
  # the new node will increase mc_max with one, so:
1034
  mc_should = min(mc_should + 1, cp_size)
1035
  return mc_now < mc_should
1036

    
1037

    
1038
def _CheckNicsBridgesExist(lu, target_nics, target_node):
1039
  """Check that the brigdes needed by a list of nics exist.
1040

1041
  """
1042
  cluster = lu.cfg.GetClusterInfo()
1043
  paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in target_nics]
1044
  brlist = [params[constants.NIC_LINK] for params in paramslist
1045
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
1046
  if brlist:
1047
    result = lu.rpc.call_bridges_exist(target_node, brlist)
1048
    result.Raise("Error checking bridges on destination node '%s'" %
1049
                 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
1050

    
1051

    
1052
def _CheckInstanceBridgesExist(lu, instance, node=None):
1053
  """Check that the brigdes needed by an instance exist.
1054

1055
  """
1056
  if node is None:
1057
    node = instance.primary_node
1058
  _CheckNicsBridgesExist(lu, instance.nics, node)
1059

    
1060

    
1061
def _CheckOSVariant(os_obj, name):
1062
  """Check whether an OS name conforms to the os variants specification.
1063

1064
  @type os_obj: L{objects.OS}
1065
  @param os_obj: OS object to check
1066
  @type name: string
1067
  @param name: OS name passed by the user, to check for validity
1068

1069
  """
1070
  if not os_obj.supported_variants:
1071
    return
1072
  variant = objects.OS.GetVariant(name)
1073
  if not variant:
1074
    raise errors.OpPrereqError("OS name must include a variant",
1075
                               errors.ECODE_INVAL)
1076

    
1077
  if variant not in os_obj.supported_variants:
1078
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
1079

    
1080

    
1081
def _GetNodeInstancesInner(cfg, fn):
1082
  return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
1083

    
1084

    
1085
def _GetNodeInstances(cfg, node_name):
1086
  """Returns a list of all primary and secondary instances on a node.
1087

1088
  """
1089

    
1090
  return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
1091

    
1092

    
1093
def _GetNodePrimaryInstances(cfg, node_name):
1094
  """Returns primary instances on a node.
1095

1096
  """
1097
  return _GetNodeInstancesInner(cfg,
1098
                                lambda inst: node_name == inst.primary_node)
1099

    
1100

    
1101
def _GetNodeSecondaryInstances(cfg, node_name):
1102
  """Returns secondary instances on a node.
1103

1104
  """
1105
  return _GetNodeInstancesInner(cfg,
1106
                                lambda inst: node_name in inst.secondary_nodes)
1107

    
1108

    
1109
def _GetStorageTypeArgs(cfg, storage_type):
1110
  """Returns the arguments for a storage type.
1111

1112
  """
1113
  # Special case for file storage
1114
  if storage_type == constants.ST_FILE:
1115
    # storage.FileStorage wants a list of storage directories
1116
    return [[cfg.GetFileStorageDir(), cfg.GetSharedFileStorageDir()]]
1117

    
1118
  return []
1119

    
1120

    
1121
def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
1122
  faulty = []
1123

    
1124
  for dev in instance.disks:
1125
    cfg.SetDiskID(dev, node_name)
1126

    
1127
  result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
1128
  result.Raise("Failed to get disk status from node %s" % node_name,
1129
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
1130

    
1131
  for idx, bdev_status in enumerate(result.payload):
1132
    if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
1133
      faulty.append(idx)
1134

    
1135
  return faulty
1136

    
1137

    
1138
def _CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
1139
  """Check the sanity of iallocator and node arguments and use the
1140
  cluster-wide iallocator if appropriate.
1141

1142
  Check that at most one of (iallocator, node) is specified. If none is
1143
  specified, then the LU's opcode's iallocator slot is filled with the
1144
  cluster-wide default iallocator.
1145

1146
  @type iallocator_slot: string
1147
  @param iallocator_slot: the name of the opcode iallocator slot
1148
  @type node_slot: string
1149
  @param node_slot: the name of the opcode target node slot
1150

1151
  """
1152
  node = getattr(lu.op, node_slot, None)
1153
  iallocator = getattr(lu.op, iallocator_slot, None)
1154

    
1155
  if node is not None and iallocator is not None:
1156
    raise errors.OpPrereqError("Do not specify both, iallocator and node.",
1157
                               errors.ECODE_INVAL)
1158
  elif node is None and iallocator is None:
1159
    default_iallocator = lu.cfg.GetDefaultIAllocator()
1160
    if default_iallocator:
1161
      setattr(lu.op, iallocator_slot, default_iallocator)
1162
    else:
1163
      raise errors.OpPrereqError("No iallocator or node given and no"
1164
                                 " cluster-wide default iallocator found."
1165
                                 " Please specify either an iallocator or a"
1166
                                 " node, or set a cluster-wide default"
1167
                                 " iallocator.")
1168

    
1169

    
1170
class LUClusterPostInit(LogicalUnit):
1171
  """Logical unit for running hooks after cluster initialization.
1172

1173
  """
1174
  HPATH = "cluster-init"
1175
  HTYPE = constants.HTYPE_CLUSTER
1176

    
1177
  def BuildHooksEnv(self):
1178
    """Build hooks env.
1179

1180
    """
1181
    return {
1182
      "OP_TARGET": self.cfg.GetClusterName(),
1183
      }
1184

    
1185
  def BuildHooksNodes(self):
1186
    """Build hooks nodes.
1187

1188
    """
1189
    return ([], [self.cfg.GetMasterNode()])
1190

    
1191
  def Exec(self, feedback_fn):
1192
    """Nothing to do.
1193

1194
    """
1195
    return True
1196

    
1197

    
1198
class LUClusterDestroy(LogicalUnit):
1199
  """Logical unit for destroying the cluster.
1200

1201
  """
1202
  HPATH = "cluster-destroy"
1203
  HTYPE = constants.HTYPE_CLUSTER
1204

    
1205
  def BuildHooksEnv(self):
1206
    """Build hooks env.
1207

1208
    """
1209
    return {
1210
      "OP_TARGET": self.cfg.GetClusterName(),
1211
      }
1212

    
1213
  def BuildHooksNodes(self):
1214
    """Build hooks nodes.
1215

1216
    """
1217
    return ([], [])
1218

    
1219
  def CheckPrereq(self):
1220
    """Check prerequisites.
1221

1222
    This checks whether the cluster is empty.
1223

1224
    Any errors are signaled by raising errors.OpPrereqError.
1225

1226
    """
1227
    master = self.cfg.GetMasterNode()
1228

    
1229
    nodelist = self.cfg.GetNodeList()
1230
    if len(nodelist) != 1 or nodelist[0] != master:
1231
      raise errors.OpPrereqError("There are still %d node(s) in"
1232
                                 " this cluster." % (len(nodelist) - 1),
1233
                                 errors.ECODE_INVAL)
1234
    instancelist = self.cfg.GetInstanceList()
1235
    if instancelist:
1236
      raise errors.OpPrereqError("There are still %d instance(s) in"
1237
                                 " this cluster." % len(instancelist),
1238
                                 errors.ECODE_INVAL)
1239

    
1240
  def Exec(self, feedback_fn):
1241
    """Destroys the cluster.
1242

1243
    """
1244
    master = self.cfg.GetMasterNode()
1245

    
1246
    # Run post hooks on master node before it's removed
1247
    _RunPostHook(self, master)
1248

    
1249
    result = self.rpc.call_node_stop_master(master, False)
1250
    result.Raise("Could not disable the master role")
1251

    
1252
    return master
1253

    
1254

    
1255
def _VerifyCertificate(filename):
1256
  """Verifies a certificate for LUClusterVerify.
1257

1258
  @type filename: string
1259
  @param filename: Path to PEM file
1260

1261
  """
1262
  try:
1263
    cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
1264
                                           utils.ReadFile(filename))
1265
  except Exception, err: # pylint: disable-msg=W0703
1266
    return (LUClusterVerify.ETYPE_ERROR,
1267
            "Failed to load X509 certificate %s: %s" % (filename, err))
1268

    
1269
  (errcode, msg) = \
1270
    utils.VerifyX509Certificate(cert, constants.SSL_CERT_EXPIRATION_WARN,
1271
                                constants.SSL_CERT_EXPIRATION_ERROR)
1272

    
1273
  if msg:
1274
    fnamemsg = "While verifying %s: %s" % (filename, msg)
1275
  else:
1276
    fnamemsg = None
1277

    
1278
  if errcode is None:
1279
    return (None, fnamemsg)
1280
  elif errcode == utils.CERT_WARNING:
1281
    return (LUClusterVerify.ETYPE_WARNING, fnamemsg)
1282
  elif errcode == utils.CERT_ERROR:
1283
    return (LUClusterVerify.ETYPE_ERROR, fnamemsg)
1284

    
1285
  raise errors.ProgrammerError("Unhandled certificate error code %r" % errcode)
1286

    
1287

    
1288
class LUClusterVerify(LogicalUnit):
1289
  """Verifies the cluster status.
1290

1291
  """
1292
  HPATH = "cluster-verify"
1293
  HTYPE = constants.HTYPE_CLUSTER
1294
  REQ_BGL = False
1295

    
1296
  TCLUSTER = "cluster"
1297
  TNODE = "node"
1298
  TINSTANCE = "instance"
1299

    
1300
  ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
1301
  ECLUSTERCERT = (TCLUSTER, "ECLUSTERCERT")
1302
  ECLUSTERFILECHECK = (TCLUSTER, "ECLUSTERFILECHECK")
1303
  EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
1304
  EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
1305
  EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
1306
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1307
  EINSTANCEFAULTYDISK = (TINSTANCE, "EINSTANCEFAULTYDISK")
1308
  EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
1309
  EINSTANCESPLITGROUPS = (TINSTANCE, "EINSTANCESPLITGROUPS")
1310
  ENODEDRBD = (TNODE, "ENODEDRBD")
1311
  ENODEDRBDHELPER = (TNODE, "ENODEDRBDHELPER")
1312
  ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
1313
  ENODEHOOKS = (TNODE, "ENODEHOOKS")
1314
  ENODEHV = (TNODE, "ENODEHV")
1315
  ENODELVM = (TNODE, "ENODELVM")
1316
  ENODEN1 = (TNODE, "ENODEN1")
1317
  ENODENET = (TNODE, "ENODENET")
1318
  ENODEOS = (TNODE, "ENODEOS")
1319
  ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
1320
  ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
1321
  ENODERPC = (TNODE, "ENODERPC")
1322
  ENODESSH = (TNODE, "ENODESSH")
1323
  ENODEVERSION = (TNODE, "ENODEVERSION")
1324
  ENODESETUP = (TNODE, "ENODESETUP")
1325
  ENODETIME = (TNODE, "ENODETIME")
1326
  ENODEOOBPATH = (TNODE, "ENODEOOBPATH")
1327

    
1328
  ETYPE_FIELD = "code"
1329
  ETYPE_ERROR = "ERROR"
1330
  ETYPE_WARNING = "WARNING"
1331

    
1332
  _HOOKS_INDENT_RE = re.compile("^", re.M)
1333

    
1334
  class NodeImage(object):
1335
    """A class representing the logical and physical status of a node.
1336

1337
    @type name: string
1338
    @ivar name: the node name to which this object refers
1339
    @ivar volumes: a structure as returned from
1340
        L{ganeti.backend.GetVolumeList} (runtime)
1341
    @ivar instances: a list of running instances (runtime)
1342
    @ivar pinst: list of configured primary instances (config)
1343
    @ivar sinst: list of configured secondary instances (config)
1344
    @ivar sbp: dictionary of {primary-node: list of instances} for all
1345
        instances for which this node is secondary (config)
1346
    @ivar mfree: free memory, as reported by hypervisor (runtime)
1347
    @ivar dfree: free disk, as reported by the node (runtime)
1348
    @ivar offline: the offline status (config)
1349
    @type rpc_fail: boolean
1350
    @ivar rpc_fail: whether the RPC verify call was successfull (overall,
1351
        not whether the individual keys were correct) (runtime)
1352
    @type lvm_fail: boolean
1353
    @ivar lvm_fail: whether the RPC call didn't return valid LVM data
1354
    @type hyp_fail: boolean
1355
    @ivar hyp_fail: whether the RPC call didn't return the instance list
1356
    @type ghost: boolean
1357
    @ivar ghost: whether this is a known node or not (config)
1358
    @type os_fail: boolean
1359
    @ivar os_fail: whether the RPC call didn't return valid OS data
1360
    @type oslist: list
1361
    @ivar oslist: list of OSes as diagnosed by DiagnoseOS
1362
    @type vm_capable: boolean
1363
    @ivar vm_capable: whether the node can host instances
1364

1365
    """
1366
    def __init__(self, offline=False, name=None, vm_capable=True):
1367
      self.name = name
1368
      self.volumes = {}
1369
      self.instances = []
1370
      self.pinst = []
1371
      self.sinst = []
1372
      self.sbp = {}
1373
      self.mfree = 0
1374
      self.dfree = 0
1375
      self.offline = offline
1376
      self.vm_capable = vm_capable
1377
      self.rpc_fail = False
1378
      self.lvm_fail = False
1379
      self.hyp_fail = False
1380
      self.ghost = False
1381
      self.os_fail = False
1382
      self.oslist = {}
1383

    
1384
  def ExpandNames(self):
1385
    self.needed_locks = {
1386
      locking.LEVEL_NODE: locking.ALL_SET,
1387
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1388
    }
1389
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1390

    
1391
  def _Error(self, ecode, item, msg, *args, **kwargs):
1392
    """Format an error message.
1393

1394
    Based on the opcode's error_codes parameter, either format a
1395
    parseable error code, or a simpler error string.
1396

1397
    This must be called only from Exec and functions called from Exec.
1398

1399
    """
1400
    ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
1401
    itype, etxt = ecode
1402
    # first complete the msg
1403
    if args:
1404
      msg = msg % args
1405
    # then format the whole message
1406
    if self.op.error_codes:
1407
      msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1408
    else:
1409
      if item:
1410
        item = " " + item
1411
      else:
1412
        item = ""
1413
      msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1414
    # and finally report it via the feedback_fn
1415
    self._feedback_fn("  - %s" % msg)
1416

    
1417
  def _ErrorIf(self, cond, *args, **kwargs):
1418
    """Log an error message if the passed condition is True.
1419

1420
    """
1421
    cond = bool(cond) or self.op.debug_simulate_errors
1422
    if cond:
1423
      self._Error(*args, **kwargs)
1424
    # do not mark the operation as failed for WARN cases only
1425
    if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
1426
      self.bad = self.bad or cond
1427

    
1428
  def _VerifyNode(self, ninfo, nresult):
1429
    """Perform some basic validation on data returned from a node.
1430

1431
      - check the result data structure is well formed and has all the
1432
        mandatory fields
1433
      - check ganeti version
1434

1435
    @type ninfo: L{objects.Node}
1436
    @param ninfo: the node to check
1437
    @param nresult: the results from the node
1438
    @rtype: boolean
1439
    @return: whether overall this call was successful (and we can expect
1440
         reasonable values in the respose)
1441

1442
    """
1443
    node = ninfo.name
1444
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1445

    
1446
    # main result, nresult should be a non-empty dict
1447
    test = not nresult or not isinstance(nresult, dict)
1448
    _ErrorIf(test, self.ENODERPC, node,
1449
                  "unable to verify node: no data returned")
1450
    if test:
1451
      return False
1452

    
1453
    # compares ganeti version
1454
    local_version = constants.PROTOCOL_VERSION
1455
    remote_version = nresult.get("version", None)
1456
    test = not (remote_version and
1457
                isinstance(remote_version, (list, tuple)) and
1458
                len(remote_version) == 2)
1459
    _ErrorIf(test, self.ENODERPC, node,
1460
             "connection to node returned invalid data")
1461
    if test:
1462
      return False
1463

    
1464
    test = local_version != remote_version[0]
1465
    _ErrorIf(test, self.ENODEVERSION, node,
1466
             "incompatible protocol versions: master %s,"
1467
             " node %s", local_version, remote_version[0])
1468
    if test:
1469
      return False
1470

    
1471
    # node seems compatible, we can actually try to look into its results
1472

    
1473
    # full package version
1474
    self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
1475
                  self.ENODEVERSION, node,
1476
                  "software version mismatch: master %s, node %s",
1477
                  constants.RELEASE_VERSION, remote_version[1],
1478
                  code=self.ETYPE_WARNING)
1479

    
1480
    hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
1481
    if ninfo.vm_capable and isinstance(hyp_result, dict):
1482
      for hv_name, hv_result in hyp_result.iteritems():
1483
        test = hv_result is not None
1484
        _ErrorIf(test, self.ENODEHV, node,
1485
                 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1486

    
1487
    hvp_result = nresult.get(constants.NV_HVPARAMS, None)
1488
    if ninfo.vm_capable and isinstance(hvp_result, list):
1489
      for item, hv_name, hv_result in hvp_result:
1490
        _ErrorIf(True, self.ENODEHV, node,
1491
                 "hypervisor %s parameter verify failure (source %s): %s",
1492
                 hv_name, item, hv_result)
1493

    
1494
    test = nresult.get(constants.NV_NODESETUP,
1495
                       ["Missing NODESETUP results"])
1496
    _ErrorIf(test, self.ENODESETUP, node, "node setup error: %s",
1497
             "; ".join(test))
1498

    
1499
    return True
1500

    
1501
  def _VerifyNodeTime(self, ninfo, nresult,
1502
                      nvinfo_starttime, nvinfo_endtime):
1503
    """Check the node time.
1504

1505
    @type ninfo: L{objects.Node}
1506
    @param ninfo: the node to check
1507
    @param nresult: the remote results for the node
1508
    @param nvinfo_starttime: the start time of the RPC call
1509
    @param nvinfo_endtime: the end time of the RPC call
1510

1511
    """
1512
    node = ninfo.name
1513
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1514

    
1515
    ntime = nresult.get(constants.NV_TIME, None)
1516
    try:
1517
      ntime_merged = utils.MergeTime(ntime)
1518
    except (ValueError, TypeError):
1519
      _ErrorIf(True, self.ENODETIME, node, "Node returned invalid time")
1520
      return
1521

    
1522
    if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
1523
      ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
1524
    elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
1525
      ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
1526
    else:
1527
      ntime_diff = None
1528

    
1529
    _ErrorIf(ntime_diff is not None, self.ENODETIME, node,
1530
             "Node time diverges by at least %s from master node time",
1531
             ntime_diff)
1532

    
1533
  def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
1534
    """Check the node LVM results.
1535

1536
    @type ninfo: L{objects.Node}
1537
    @param ninfo: the node to check
1538
    @param nresult: the remote results for the node
1539
    @param vg_name: the configured VG name
1540

1541
    """
1542
    if vg_name is None:
1543
      return
1544

    
1545
    node = ninfo.name
1546
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1547

    
1548
    # checks vg existence and size > 20G
1549
    vglist = nresult.get(constants.NV_VGLIST, None)
1550
    test = not vglist
1551
    _ErrorIf(test, self.ENODELVM, node, "unable to check volume groups")
1552
    if not test:
1553
      vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
1554
                                            constants.MIN_VG_SIZE)
1555
      _ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
1556

    
1557
    # check pv names
1558
    pvlist = nresult.get(constants.NV_PVLIST, None)
1559
    test = pvlist is None
1560
    _ErrorIf(test, self.ENODELVM, node, "Can't get PV list from node")
1561
    if not test:
1562
      # check that ':' is not present in PV names, since it's a
1563
      # special character for lvcreate (denotes the range of PEs to
1564
      # use on the PV)
1565
      for _, pvname, owner_vg in pvlist:
1566
        test = ":" in pvname
1567
        _ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV"
1568
                 " '%s' of VG '%s'", pvname, owner_vg)
1569

    
1570
  def _VerifyNodeBridges(self, ninfo, nresult, bridges):
1571
    """Check the node bridges.
1572

1573
    @type ninfo: L{objects.Node}
1574
    @param ninfo: the node to check
1575
    @param nresult: the remote results for the node
1576
    @param bridges: the expected list of bridges
1577

1578
    """
1579
    if not bridges:
1580
      return
1581

    
1582
    node = ninfo.name
1583
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1584

    
1585
    missing = nresult.get(constants.NV_BRIDGES, None)
1586
    test = not isinstance(missing, list)
1587
    _ErrorIf(test, self.ENODENET, node,
1588
             "did not return valid bridge information")
1589
    if not test:
1590
      _ErrorIf(bool(missing), self.ENODENET, node, "missing bridges: %s" %
1591
               utils.CommaJoin(sorted(missing)))
1592

    
1593
  def _VerifyNodeNetwork(self, ninfo, nresult):
1594
    """Check the node network connectivity results.
1595

1596
    @type ninfo: L{objects.Node}
1597
    @param ninfo: the node to check
1598
    @param nresult: the remote results for the node
1599

1600
    """
1601
    node = ninfo.name
1602
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1603

    
1604
    test = constants.NV_NODELIST not in nresult
1605
    _ErrorIf(test, self.ENODESSH, node,
1606
             "node hasn't returned node ssh connectivity data")
1607
    if not test:
1608
      if nresult[constants.NV_NODELIST]:
1609
        for a_node, a_msg in nresult[constants.NV_NODELIST].items():
1610
          _ErrorIf(True, self.ENODESSH, node,
1611
                   "ssh communication with node '%s': %s", a_node, a_msg)
1612

    
1613
    test = constants.NV_NODENETTEST not in nresult
1614
    _ErrorIf(test, self.ENODENET, node,
1615
             "node hasn't returned node tcp connectivity data")
1616
    if not test:
1617
      if nresult[constants.NV_NODENETTEST]:
1618
        nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
1619
        for anode in nlist:
1620
          _ErrorIf(True, self.ENODENET, node,
1621
                   "tcp communication with node '%s': %s",
1622
                   anode, nresult[constants.NV_NODENETTEST][anode])
1623

    
1624
    test = constants.NV_MASTERIP not in nresult
1625
    _ErrorIf(test, self.ENODENET, node,
1626
             "node hasn't returned node master IP reachability data")
1627
    if not test:
1628
      if not nresult[constants.NV_MASTERIP]:
1629
        if node == self.master_node:
1630
          msg = "the master node cannot reach the master IP (not configured?)"
1631
        else:
1632
          msg = "cannot reach the master IP"
1633
        _ErrorIf(True, self.ENODENET, node, msg)
1634

    
1635
  def _VerifyInstance(self, instance, instanceconfig, node_image,
1636
                      diskstatus):
1637
    """Verify an instance.
1638

1639
    This function checks to see if the required block devices are
1640
    available on the instance's node.
1641

1642
    """
1643
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1644
    node_current = instanceconfig.primary_node
1645

    
1646
    node_vol_should = {}
1647
    instanceconfig.MapLVsByNode(node_vol_should)
1648

    
1649
    for node in node_vol_should:
1650
      n_img = node_image[node]
1651
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1652
        # ignore missing volumes on offline or broken nodes
1653
        continue
1654
      for volume in node_vol_should[node]:
1655
        test = volume not in n_img.volumes
1656
        _ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
1657
                 "volume %s missing on node %s", volume, node)
1658

    
1659
    if instanceconfig.admin_up:
1660
      pri_img = node_image[node_current]
1661
      test = instance not in pri_img.instances and not pri_img.offline
1662
      _ErrorIf(test, self.EINSTANCEDOWN, instance,
1663
               "instance not running on its primary node %s",
1664
               node_current)
1665

    
1666
    for node, n_img in node_image.items():
1667
      if node != node_current:
1668
        test = instance in n_img.instances
1669
        _ErrorIf(test, self.EINSTANCEWRONGNODE, instance,
1670
                 "instance should not run on node %s", node)
1671

    
1672
    diskdata = [(nname, success, status, idx)
1673
                for (nname, disks) in diskstatus.items()
1674
                for idx, (success, status) in enumerate(disks)]
1675

    
1676
    for nname, success, bdev_status, idx in diskdata:
1677
      # the 'ghost node' construction in Exec() ensures that we have a
1678
      # node here
1679
      snode = node_image[nname]
1680
      bad_snode = snode.ghost or snode.offline
1681
      _ErrorIf(instanceconfig.admin_up and not success and not bad_snode,
1682
               self.EINSTANCEFAULTYDISK, instance,
1683
               "couldn't retrieve status for disk/%s on %s: %s",
1684
               idx, nname, bdev_status)
1685
      _ErrorIf((instanceconfig.admin_up and success and
1686
                bdev_status.ldisk_status == constants.LDS_FAULTY),
1687
               self.EINSTANCEFAULTYDISK, instance,
1688
               "disk/%s on %s is faulty", idx, nname)
1689

    
1690
  def _VerifyOrphanVolumes(self, node_vol_should, node_image, reserved):
1691
    """Verify if there are any unknown volumes in the cluster.
1692

1693
    The .os, .swap and backup volumes are ignored. All other volumes are
1694
    reported as unknown.
1695

1696
    @type reserved: L{ganeti.utils.FieldSet}
1697
    @param reserved: a FieldSet of reserved volume names
1698

1699
    """
1700
    for node, n_img in node_image.items():
1701
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1702
        # skip non-healthy nodes
1703
        continue
1704
      for volume in n_img.volumes:
1705
        test = ((node not in node_vol_should or
1706
                volume not in node_vol_should[node]) and
1707
                not reserved.Matches(volume))
1708
        self._ErrorIf(test, self.ENODEORPHANLV, node,
1709
                      "volume %s is unknown", volume)
1710

    
1711
  def _VerifyOrphanInstances(self, instancelist, node_image):
1712
    """Verify the list of running instances.
1713

1714
    This checks what instances are running but unknown to the cluster.
1715

1716
    """
1717
    for node, n_img in node_image.items():
1718
      for o_inst in n_img.instances:
1719
        test = o_inst not in instancelist
1720
        self._ErrorIf(test, self.ENODEORPHANINSTANCE, node,
1721
                      "instance %s on node %s should not exist", o_inst, node)
1722

    
1723
  def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
1724
    """Verify N+1 Memory Resilience.
1725

1726
    Check that if one single node dies we can still start all the
1727
    instances it was primary for.
1728

1729
    """
1730
    cluster_info = self.cfg.GetClusterInfo()
1731
    for node, n_img in node_image.items():
1732
      # This code checks that every node which is now listed as
1733
      # secondary has enough memory to host all instances it is
1734
      # supposed to should a single other node in the cluster fail.
1735
      # FIXME: not ready for failover to an arbitrary node
1736
      # FIXME: does not support file-backed instances
1737
      # WARNING: we currently take into account down instances as well
1738
      # as up ones, considering that even if they're down someone
1739
      # might want to start them even in the event of a node failure.
1740
      if n_img.offline:
1741
        # we're skipping offline nodes from the N+1 warning, since
1742
        # most likely we don't have good memory infromation from them;
1743
        # we already list instances living on such nodes, and that's
1744
        # enough warning
1745
        continue
1746
      for prinode, instances in n_img.sbp.items():
1747
        needed_mem = 0
1748
        for instance in instances:
1749
          bep = cluster_info.FillBE(instance_cfg[instance])
1750
          if bep[constants.BE_AUTO_BALANCE]:
1751
            needed_mem += bep[constants.BE_MEMORY]
1752
        test = n_img.mfree < needed_mem
1753
        self._ErrorIf(test, self.ENODEN1, node,
1754
                      "not enough memory to accomodate instance failovers"
1755
                      " should node %s fail (%dMiB needed, %dMiB available)",
1756
                      prinode, needed_mem, n_img.mfree)
1757

    
1758
  @classmethod
1759
  def _VerifyFiles(cls, errorif, nodeinfo, master_node, all_nvinfo,
1760
                   (files_all, files_all_opt, files_mc, files_vm)):
1761
    """Verifies file checksums collected from all nodes.
1762

1763
    @param errorif: Callback for reporting errors
1764
    @param nodeinfo: List of L{objects.Node} objects
1765
    @param master_node: Name of master node
1766
    @param all_nvinfo: RPC results
1767

1768
    """
1769
    node_names = frozenset(node.name for node in nodeinfo)
1770

    
1771
    assert master_node in node_names
1772
    assert (len(files_all | files_all_opt | files_mc | files_vm) ==
1773
            sum(map(len, [files_all, files_all_opt, files_mc, files_vm]))), \
1774
           "Found file listed in more than one file list"
1775

    
1776
    # Define functions determining which nodes to consider for a file
1777
    file2nodefn = dict([(filename, fn)
1778
      for (files, fn) in [(files_all, None),
1779
                          (files_all_opt, None),
1780
                          (files_mc, lambda node: (node.master_candidate or
1781
                                                   node.name == master_node)),
1782
                          (files_vm, lambda node: node.vm_capable)]
1783
      for filename in files])
1784

    
1785
    fileinfo = dict((filename, {}) for filename in file2nodefn.keys())
1786

    
1787
    for node in nodeinfo:
1788
      nresult = all_nvinfo[node.name]
1789

    
1790
      if nresult.fail_msg or not nresult.payload:
1791
        node_files = None
1792
      else:
1793
        node_files = nresult.payload.get(constants.NV_FILELIST, None)
1794

    
1795
      test = not (node_files and isinstance(node_files, dict))
1796
      errorif(test, cls.ENODEFILECHECK, node.name,
1797
              "Node did not return file checksum data")
1798
      if test:
1799
        continue
1800

    
1801
      for (filename, checksum) in node_files.items():
1802
        # Check if the file should be considered for a node
1803
        fn = file2nodefn[filename]
1804
        if fn is None or fn(node):
1805
          fileinfo[filename].setdefault(checksum, set()).add(node.name)
1806

    
1807
    for (filename, checksums) in fileinfo.items():
1808
      assert compat.all(len(i) > 10 for i in checksums), "Invalid checksum"
1809

    
1810
      # Nodes having the file
1811
      with_file = frozenset(node_name
1812
                            for nodes in fileinfo[filename].values()
1813
                            for node_name in nodes)
1814

    
1815
      # Nodes missing file
1816
      missing_file = node_names - with_file
1817

    
1818
      if filename in files_all_opt:
1819
        # All or no nodes
1820
        errorif(missing_file and missing_file != node_names,
1821
                cls.ECLUSTERFILECHECK, None,
1822
                "File %s is optional, but it must exist on all or no nodes (not"
1823
                " found on %s)",
1824
                filename, utils.CommaJoin(utils.NiceSort(missing_file)))
1825
      else:
1826
        errorif(missing_file, cls.ECLUSTERFILECHECK, None,
1827
                "File %s is missing from node(s) %s", filename,
1828
                utils.CommaJoin(utils.NiceSort(missing_file)))
1829

    
1830
      # See if there are multiple versions of the file
1831
      test = len(checksums) > 1
1832
      if test:
1833
        variants = ["variant %s on %s" %
1834
                    (idx + 1, utils.CommaJoin(utils.NiceSort(nodes)))
1835
                    for (idx, (checksum, nodes)) in
1836
                      enumerate(sorted(checksums.items()))]
1837
      else:
1838
        variants = []
1839

    
1840
      errorif(test, cls.ECLUSTERFILECHECK, None,
1841
              "File %s found with %s different checksums (%s)",
1842
              filename, len(checksums), "; ".join(variants))
1843

    
1844
  def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_helper,
1845
                      drbd_map):
1846
    """Verifies and the node DRBD status.
1847

1848
    @type ninfo: L{objects.Node}
1849
    @param ninfo: the node to check
1850
    @param nresult: the remote results for the node
1851
    @param instanceinfo: the dict of instances
1852
    @param drbd_helper: the configured DRBD usermode helper
1853
    @param drbd_map: the DRBD map as returned by
1854
        L{ganeti.config.ConfigWriter.ComputeDRBDMap}
1855

1856
    """
1857
    node = ninfo.name
1858
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1859

    
1860
    if drbd_helper:
1861
      helper_result = nresult.get(constants.NV_DRBDHELPER, None)
1862
      test = (helper_result == None)
1863
      _ErrorIf(test, self.ENODEDRBDHELPER, node,
1864
               "no drbd usermode helper returned")
1865
      if helper_result:
1866
        status, payload = helper_result
1867
        test = not status
1868
        _ErrorIf(test, self.ENODEDRBDHELPER, node,
1869
                 "drbd usermode helper check unsuccessful: %s", payload)
1870
        test = status and (payload != drbd_helper)
1871
        _ErrorIf(test, self.ENODEDRBDHELPER, node,
1872
                 "wrong drbd usermode helper: %s", payload)
1873

    
1874
    # compute the DRBD minors
1875
    node_drbd = {}
1876
    for minor, instance in drbd_map[node].items():
1877
      test = instance not in instanceinfo
1878
      _ErrorIf(test, self.ECLUSTERCFG, None,
1879
               "ghost instance '%s' in temporary DRBD map", instance)
1880
        # ghost instance should not be running, but otherwise we
1881
        # don't give double warnings (both ghost instance and
1882
        # unallocated minor in use)
1883
      if test:
1884
        node_drbd[minor] = (instance, False)
1885
      else:
1886
        instance = instanceinfo[instance]
1887
        node_drbd[minor] = (instance.name, instance.admin_up)
1888

    
1889
    # and now check them
1890
    used_minors = nresult.get(constants.NV_DRBDLIST, [])
1891
    test = not isinstance(used_minors, (tuple, list))
1892
    _ErrorIf(test, self.ENODEDRBD, node,
1893
             "cannot parse drbd status file: %s", str(used_minors))
1894
    if test:
1895
      # we cannot check drbd status
1896
      return
1897

    
1898
    for minor, (iname, must_exist) in node_drbd.items():
1899
      test = minor not in used_minors and must_exist
1900
      _ErrorIf(test, self.ENODEDRBD, node,
1901
               "drbd minor %d of instance %s is not active", minor, iname)
1902
    for minor in used_minors:
1903
      test = minor not in node_drbd
1904
      _ErrorIf(test, self.ENODEDRBD, node,
1905
               "unallocated drbd minor %d is in use", minor)
1906

    
1907
  def _UpdateNodeOS(self, ninfo, nresult, nimg):
1908
    """Builds the node OS structures.
1909

1910
    @type ninfo: L{objects.Node}
1911
    @param ninfo: the node to check
1912
    @param nresult: the remote results for the node
1913
    @param nimg: the node image object
1914

1915
    """
1916
    node = ninfo.name
1917
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1918

    
1919
    remote_os = nresult.get(constants.NV_OSLIST, None)
1920
    test = (not isinstance(remote_os, list) or
1921
            not compat.all(isinstance(v, list) and len(v) == 7
1922
                           for v in remote_os))
1923

    
1924
    _ErrorIf(test, self.ENODEOS, node,
1925
             "node hasn't returned valid OS data")
1926

    
1927
    nimg.os_fail = test
1928

    
1929
    if test:
1930
      return
1931

    
1932
    os_dict = {}
1933

    
1934
    for (name, os_path, status, diagnose,
1935
         variants, parameters, api_ver) in nresult[constants.NV_OSLIST]:
1936

    
1937
      if name not in os_dict:
1938
        os_dict[name] = []
1939

    
1940
      # parameters is a list of lists instead of list of tuples due to
1941
      # JSON lacking a real tuple type, fix it:
1942
      parameters = [tuple(v) for v in parameters]
1943
      os_dict[name].append((os_path, status, diagnose,
1944
                            set(variants), set(parameters), set(api_ver)))
1945

    
1946
    nimg.oslist = os_dict
1947

    
1948
  def _VerifyNodeOS(self, ninfo, nimg, base):
1949
    """Verifies the node OS list.
1950

1951
    @type ninfo: L{objects.Node}
1952
    @param ninfo: the node to check
1953
    @param nimg: the node image object
1954
    @param base: the 'template' node we match against (e.g. from the master)
1955

1956
    """
1957
    node = ninfo.name
1958
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1959

    
1960
    assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?"
1961

    
1962
    beautify_params = lambda l: ["%s: %s" % (k, v) for (k, v) in l]
1963
    for os_name, os_data in nimg.oslist.items():
1964
      assert os_data, "Empty OS status for OS %s?!" % os_name
1965
      f_path, f_status, f_diag, f_var, f_param, f_api = os_data[0]
1966
      _ErrorIf(not f_status, self.ENODEOS, node,
1967
               "Invalid OS %s (located at %s): %s", os_name, f_path, f_diag)
1968
      _ErrorIf(len(os_data) > 1, self.ENODEOS, node,
1969
               "OS '%s' has multiple entries (first one shadows the rest): %s",
1970
               os_name, utils.CommaJoin([v[0] for v in os_data]))
1971
      # this will catched in backend too
1972
      _ErrorIf(compat.any(v >= constants.OS_API_V15 for v in f_api)
1973
               and not f_var, self.ENODEOS, node,
1974
               "OS %s with API at least %d does not declare any variant",
1975
               os_name, constants.OS_API_V15)
1976
      # comparisons with the 'base' image
1977
      test = os_name not in base.oslist
1978
      _ErrorIf(test, self.ENODEOS, node,
1979
               "Extra OS %s not present on reference node (%s)",
1980
               os_name, base.name)
1981
      if test:
1982
        continue
1983
      assert base.oslist[os_name], "Base node has empty OS status?"
1984
      _, b_status, _, b_var, b_param, b_api = base.oslist[os_name][0]
1985
      if not b_status:
1986
        # base OS is invalid, skipping
1987
        continue
1988
      for kind, a, b in [("API version", f_api, b_api),
1989
                         ("variants list", f_var, b_var),
1990
                         ("parameters", beautify_params(f_param),
1991
                          beautify_params(b_param))]:
1992
        _ErrorIf(a != b, self.ENODEOS, node,
1993
                 "OS %s for %s differs from reference node %s: [%s] vs. [%s]",
1994
                 kind, os_name, base.name,
1995
                 utils.CommaJoin(sorted(a)), utils.CommaJoin(sorted(b)))
1996

    
1997
    # check any missing OSes
1998
    missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
1999
    _ErrorIf(missing, self.ENODEOS, node,
2000
             "OSes present on reference node %s but missing on this node: %s",
2001
             base.name, utils.CommaJoin(missing))
2002

    
2003
  def _VerifyOob(self, ninfo, nresult):
2004
    """Verifies out of band functionality of a node.
2005

2006
    @type ninfo: L{objects.Node}
2007
    @param ninfo: the node to check
2008
    @param nresult: the remote results for the node
2009

2010
    """
2011
    node = ninfo.name
2012
    # We just have to verify the paths on master and/or master candidates
2013
    # as the oob helper is invoked on the master
2014
    if ((ninfo.master_candidate or ninfo.master_capable) and
2015
        constants.NV_OOB_PATHS in nresult):
2016
      for path_result in nresult[constants.NV_OOB_PATHS]:
2017
        self._ErrorIf(path_result, self.ENODEOOBPATH, node, path_result)
2018

    
2019
  def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
2020
    """Verifies and updates the node volume data.
2021

2022
    This function will update a L{NodeImage}'s internal structures
2023
    with data from the remote call.
2024

2025
    @type ninfo: L{objects.Node}
2026
    @param ninfo: the node to check
2027
    @param nresult: the remote results for the node
2028
    @param nimg: the node image object
2029
    @param vg_name: the configured VG name
2030

2031
    """
2032
    node = ninfo.name
2033
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
2034

    
2035
    nimg.lvm_fail = True
2036
    lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
2037
    if vg_name is None:
2038
      pass
2039
    elif isinstance(lvdata, basestring):
2040
      _ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s",
2041
               utils.SafeEncode(lvdata))
2042
    elif not isinstance(lvdata, dict):
2043
      _ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)")
2044
    else:
2045
      nimg.volumes = lvdata
2046
      nimg.lvm_fail = False
2047

    
2048
  def _UpdateNodeInstances(self, ninfo, nresult, nimg):
2049
    """Verifies and updates the node instance list.
2050

2051
    If the listing was successful, then updates this node's instance
2052
    list. Otherwise, it marks the RPC call as failed for the instance
2053
    list key.
2054

2055
    @type ninfo: L{objects.Node}
2056
    @param ninfo: the node to check
2057
    @param nresult: the remote results for the node
2058
    @param nimg: the node image object
2059

2060
    """
2061
    idata = nresult.get(constants.NV_INSTANCELIST, None)
2062
    test = not isinstance(idata, list)
2063
    self._ErrorIf(test, self.ENODEHV, ninfo.name, "rpc call to node failed"
2064
                  " (instancelist): %s", utils.SafeEncode(str(idata)))
2065
    if test:
2066
      nimg.hyp_fail = True
2067
    else:
2068
      nimg.instances = idata
2069

    
2070
  def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
2071
    """Verifies and computes a node information map
2072

2073
    @type ninfo: L{objects.Node}
2074
    @param ninfo: the node to check
2075
    @param nresult: the remote results for the node
2076
    @param nimg: the node image object
2077
    @param vg_name: the configured VG name
2078

2079
    """
2080
    node = ninfo.name
2081
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
2082

    
2083
    # try to read free memory (from the hypervisor)
2084
    hv_info = nresult.get(constants.NV_HVINFO, None)
2085
    test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
2086
    _ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)")
2087
    if not test:
2088
      try:
2089
        nimg.mfree = int(hv_info["memory_free"])
2090
      except (ValueError, TypeError):
2091
        _ErrorIf(True, self.ENODERPC, node,
2092
                 "node returned invalid nodeinfo, check hypervisor")
2093

    
2094
    # FIXME: devise a free space model for file based instances as well
2095
    if vg_name is not None:
2096
      test = (constants.NV_VGLIST not in nresult or
2097
              vg_name not in nresult[constants.NV_VGLIST])
2098
      _ErrorIf(test, self.ENODELVM, node,
2099
               "node didn't return data for the volume group '%s'"
2100
               " - it is either missing or broken", vg_name)
2101
      if not test:
2102
        try:
2103
          nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
2104
        except (ValueError, TypeError):
2105
          _ErrorIf(True, self.ENODERPC, node,
2106
                   "node returned invalid LVM info, check LVM status")
2107

    
2108
  def _CollectDiskInfo(self, nodelist, node_image, instanceinfo):
2109
    """Gets per-disk status information for all instances.
2110

2111
    @type nodelist: list of strings
2112
    @param nodelist: Node names
2113
    @type node_image: dict of (name, L{objects.Node})
2114
    @param node_image: Node objects
2115
    @type instanceinfo: dict of (name, L{objects.Instance})
2116
    @param instanceinfo: Instance objects
2117
    @rtype: {instance: {node: [(succes, payload)]}}
2118
    @return: a dictionary of per-instance dictionaries with nodes as
2119
        keys and disk information as values; the disk information is a
2120
        list of tuples (success, payload)
2121

2122
    """
2123
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
2124

    
2125
    node_disks = {}
2126
    node_disks_devonly = {}
2127
    diskless_instances = set()
2128
    diskless = constants.DT_DISKLESS
2129

    
2130
    for nname in nodelist:
2131
      node_instances = list(itertools.chain(node_image[nname].pinst,
2132
                                            node_image[nname].sinst))
2133
      diskless_instances.update(inst for inst in node_instances
2134
                                if instanceinfo[inst].disk_template == diskless)
2135
      disks = [(inst, disk)
2136
               for inst in node_instances
2137
               for disk in instanceinfo[inst].disks]
2138

    
2139
      if not disks:
2140
        # No need to collect data
2141
        continue
2142

    
2143
      node_disks[nname] = disks
2144

    
2145
      # Creating copies as SetDiskID below will modify the objects and that can
2146
      # lead to incorrect data returned from nodes
2147
      devonly = [dev.Copy() for (_, dev) in disks]
2148

    
2149
      for dev in devonly:
2150
        self.cfg.SetDiskID(dev, nname)
2151

    
2152
      node_disks_devonly[nname] = devonly
2153

    
2154
    assert len(node_disks) == len(node_disks_devonly)
2155

    
2156
    # Collect data from all nodes with disks
2157
    result = self.rpc.call_blockdev_getmirrorstatus_multi(node_disks.keys(),
2158
                                                          node_disks_devonly)
2159

    
2160
    assert len(result) == len(node_disks)
2161

    
2162
    instdisk = {}
2163

    
2164
    for (nname, nres) in result.items():
2165
      disks = node_disks[nname]
2166

    
2167
      if nres.offline:
2168
        # No data from this node
2169
        data = len(disks) * [(False, "node offline")]
2170
      else:
2171
        msg = nres.fail_msg
2172
        _ErrorIf(msg, self.ENODERPC, nname,
2173
                 "while getting disk information: %s", msg)
2174
        if msg:
2175
          # No data from this node
2176
          data = len(disks) * [(False, msg)]
2177
        else:
2178
          data = []
2179
          for idx, i in enumerate(nres.payload):
2180
            if isinstance(i, (tuple, list)) and len(i) == 2:
2181
              data.append(i)
2182
            else:
2183
              logging.warning("Invalid result from node %s, entry %d: %s",
2184
                              nname, idx, i)
2185
              data.append((False, "Invalid result from the remote node"))
2186

    
2187
      for ((inst, _), status) in zip(disks, data):
2188
        instdisk.setdefault(inst, {}).setdefault(nname, []).append(status)
2189

    
2190
    # Add empty entries for diskless instances.
2191
    for inst in diskless_instances:
2192
      assert inst not in instdisk
2193
      instdisk[inst] = {}
2194

    
2195
    assert compat.all(len(statuses) == len(instanceinfo[inst].disks) and
2196
                      len(nnames) <= len(instanceinfo[inst].all_nodes) and
2197
                      compat.all(isinstance(s, (tuple, list)) and
2198
                                 len(s) == 2 for s in statuses)
2199
                      for inst, nnames in instdisk.items()
2200
                      for nname, statuses in nnames.items())
2201
    assert set(instdisk) == set(instanceinfo), "instdisk consistency failure"
2202

    
2203
    return instdisk
2204

    
2205
  def _VerifyHVP(self, hvp_data):
2206
    """Verifies locally the syntax of the hypervisor parameters.
2207

2208
    """
2209
    for item, hv_name, hv_params in hvp_data:
2210
      msg = ("hypervisor %s parameters syntax check (source %s): %%s" %
2211
             (item, hv_name))
2212
      try:
2213
        hv_class = hypervisor.GetHypervisor(hv_name)
2214
        utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2215
        hv_class.CheckParameterSyntax(hv_params)
2216
      except errors.GenericError, err:
2217
        self._ErrorIf(True, self.ECLUSTERCFG, None, msg % str(err))
2218

    
2219
  def BuildHooksEnv(self):
2220
    """Build hooks env.
2221

2222
    Cluster-Verify hooks just ran in the post phase and their failure makes
2223
    the output be logged in the verify output and the verification to fail.
2224

2225
    """
2226
    cfg = self.cfg
2227

    
2228
    env = {
2229
      "CLUSTER_TAGS": " ".join(cfg.GetClusterInfo().GetTags())
2230
      }
2231

    
2232
    env.update(("NODE_TAGS_%s" % node.name, " ".join(node.GetTags()))
2233
               for node in cfg.GetAllNodesInfo().values())
2234

    
2235
    return env
2236

    
2237
  def BuildHooksNodes(self):
2238
    """Build hooks nodes.
2239

2240
    """
2241
    return ([], self.cfg.GetNodeList())
2242

    
2243
  def Exec(self, feedback_fn):
2244
    """Verify integrity of cluster, performing various test on nodes.
2245

2246
    """
2247
    # This method has too many local variables. pylint: disable-msg=R0914
2248
    self.bad = False
2249
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
2250
    verbose = self.op.verbose
2251
    self._feedback_fn = feedback_fn
2252
    feedback_fn("* Verifying global settings")
2253
    for msg in self.cfg.VerifyConfig():
2254
      _ErrorIf(True, self.ECLUSTERCFG, None, msg)
2255

    
2256
    # Check the cluster certificates
2257
    for cert_filename in constants.ALL_CERT_FILES:
2258
      (errcode, msg) = _VerifyCertificate(cert_filename)
2259
      _ErrorIf(errcode, self.ECLUSTERCERT, None, msg, code=errcode)
2260

    
2261
    vg_name = self.cfg.GetVGName()
2262
    drbd_helper = self.cfg.GetDRBDHelper()
2263
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
2264
    cluster = self.cfg.GetClusterInfo()
2265
    nodeinfo_byname = self.cfg.GetAllNodesInfo()
2266
    nodelist = utils.NiceSort(nodeinfo_byname.keys())
2267
    nodeinfo = [nodeinfo_byname[nname] for nname in nodelist]
2268
    instanceinfo = self.cfg.GetAllInstancesInfo()
2269
    instancelist = utils.NiceSort(instanceinfo.keys())
2270
    groupinfo = self.cfg.GetAllNodeGroupsInfo()
2271
    i_non_redundant = [] # Non redundant instances
2272
    i_non_a_balanced = [] # Non auto-balanced instances
2273
    n_offline = 0 # Count of offline nodes
2274
    n_drained = 0 # Count of nodes being drained
2275
    node_vol_should = {}
2276

    
2277
    # FIXME: verify OS list
2278

    
2279
    # File verification
2280
    filemap = _ComputeAncillaryFiles(cluster, False)
2281

    
2282
    # do local checksums
2283
    master_node = self.master_node = self.cfg.GetMasterNode()
2284
    master_ip = self.cfg.GetMasterIP()
2285

    
2286
    # Compute the set of hypervisor parameters
2287
    hvp_data = []
2288
    for hv_name in hypervisors:
2289
      hvp_data.append(("cluster", hv_name, cluster.GetHVDefaults(hv_name)))
2290
    for os_name, os_hvp in cluster.os_hvp.items():
2291
      for hv_name, hv_params in os_hvp.items():
2292
        if not hv_params:
2293
          continue
2294
        full_params = cluster.GetHVDefaults(hv_name, os_name=os_name)
2295
        hvp_data.append(("os %s" % os_name, hv_name, full_params))
2296
    # TODO: collapse identical parameter values in a single one
2297
    for instance in instanceinfo.values():
2298
      if not instance.hvparams:
2299
        continue
2300
      hvp_data.append(("instance %s" % instance.name, instance.hypervisor,
2301
                       cluster.FillHV(instance)))
2302
    # and verify them locally
2303
    self._VerifyHVP(hvp_data)
2304

    
2305
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
2306
    node_verify_param = {
2307
      constants.NV_FILELIST:
2308
        utils.UniqueSequence(filename
2309
                             for files in filemap
2310
                             for filename in files),
2311
      constants.NV_NODELIST: [node.name for node in nodeinfo
2312
                              if not node.offline],
2313
      constants.NV_HYPERVISOR: hypervisors,
2314
      constants.NV_HVPARAMS: hvp_data,
2315
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
2316
                                  node.secondary_ip) for node in nodeinfo
2317
                                 if not node.offline],
2318
      constants.NV_INSTANCELIST: hypervisors,
2319
      constants.NV_VERSION: None,
2320
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
2321
      constants.NV_NODESETUP: None,
2322
      constants.NV_TIME: None,
2323
      constants.NV_MASTERIP: (master_node, master_ip),
2324
      constants.NV_OSLIST: None,
2325
      constants.NV_VMNODES: self.cfg.GetNonVmCapableNodeList(),
2326
      }
2327

    
2328
    if vg_name is not None:
2329
      node_verify_param[constants.NV_VGLIST] = None
2330
      node_verify_param[constants.NV_LVLIST] = vg_name
2331
      node_verify_param[constants.NV_PVLIST] = [vg_name]
2332
      node_verify_param[constants.NV_DRBDLIST] = None
2333

    
2334
    if drbd_helper:
2335
      node_verify_param[constants.NV_DRBDHELPER] = drbd_helper
2336

    
2337
    # bridge checks
2338
    # FIXME: this needs to be changed per node-group, not cluster-wide
2339
    bridges = set()
2340
    default_nicpp = cluster.nicparams[constants.PP_DEFAULT]
2341
    if default_nicpp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
2342
      bridges.add(default_nicpp[constants.NIC_LINK])
2343
    for instance in instanceinfo.values():
2344
      for nic in instance.nics:
2345
        full_nic = cluster.SimpleFillNIC(nic.nicparams)
2346
        if full_nic[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
2347
          bridges.add(full_nic[constants.NIC_LINK])
2348

    
2349
    if bridges:
2350
      node_verify_param[constants.NV_BRIDGES] = list(bridges)
2351

    
2352
    # Build our expected cluster state
2353
    node_image = dict((node.name, self.NodeImage(offline=node.offline,
2354
                                                 name=node.name,
2355
                                                 vm_capable=node.vm_capable))
2356
                      for node in nodeinfo)
2357

    
2358
    # Gather OOB paths
2359
    oob_paths = []
2360
    for node in nodeinfo:
2361
      path = _SupportsOob(self.cfg, node)
2362
      if path and path not in oob_paths:
2363
        oob_paths.append(path)
2364

    
2365
    if oob_paths:
2366
      node_verify_param[constants.NV_OOB_PATHS] = oob_paths
2367

    
2368
    for instance in instancelist:
2369
      inst_config = instanceinfo[instance]
2370

    
2371
      for nname in inst_config.all_nodes:
2372
        if nname not in node_image:
2373
          # ghost node
2374
          gnode = self.NodeImage(name=nname)
2375
          gnode.ghost = True
2376
          node_image[nname] = gnode
2377

    
2378
      inst_config.MapLVsByNode(node_vol_should)
2379

    
2380
      pnode = inst_config.primary_node
2381
      node_image[pnode].pinst.append(instance)
2382

    
2383
      for snode in inst_config.secondary_nodes:
2384
        nimg = node_image[snode]
2385
        nimg.sinst.append(instance)
2386
        if pnode not in nimg.sbp:
2387
          nimg.sbp[pnode] = []
2388
        nimg.sbp[pnode].append(instance)
2389

    
2390
    # At this point, we have the in-memory data structures complete,
2391
    # except for the runtime information, which we'll gather next
2392

    
2393
    # Due to the way our RPC system works, exact response times cannot be
2394
    # guaranteed (e.g. a broken node could run into a timeout). By keeping the
2395
    # time before and after executing the request, we can at least have a time
2396
    # window.
2397
    nvinfo_starttime = time.time()
2398
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
2399
                                           self.cfg.GetClusterName())
2400
    nvinfo_endtime = time.time()
2401

    
2402
    all_drbd_map = self.cfg.ComputeDRBDMap()
2403

    
2404
    feedback_fn("* Gathering disk information (%s nodes)" % len(nodelist))
2405
    instdisk = self._CollectDiskInfo(nodelist, node_image, instanceinfo)
2406

    
2407
    feedback_fn("* Verifying configuration file consistency")
2408
    self._VerifyFiles(_ErrorIf, nodeinfo, master_node, all_nvinfo, filemap)
2409

    
2410
    feedback_fn("* Verifying node status")
2411

    
2412
    refos_img = None
2413

    
2414
    for node_i in nodeinfo:
2415
      node = node_i.name
2416
      nimg = node_image[node]
2417

    
2418
      if node_i.offline:
2419
        if verbose:
2420
          feedback_fn("* Skipping offline node %s" % (node,))
2421
        n_offline += 1
2422
        continue
2423

    
2424
      if node == master_node:
2425
        ntype = "master"
2426
      elif node_i.master_candidate:
2427
        ntype = "master candidate"
2428
      elif node_i.drained:
2429
        ntype = "drained"
2430
        n_drained += 1
2431
      else:
2432
        ntype = "regular"
2433
      if verbose:
2434
        feedback_fn("* Verifying node %s (%s)" % (node, ntype))
2435

    
2436
      msg = all_nvinfo[node].fail_msg
2437
      _ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg)
2438
      if msg:
2439
        nimg.rpc_fail = True
2440
        continue
2441

    
2442
      nresult = all_nvinfo[node].payload
2443

    
2444
      nimg.call_ok = self._VerifyNode(node_i, nresult)
2445
      self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
2446
      self._VerifyNodeNetwork(node_i, nresult)
2447
      self._VerifyOob(node_i, nresult)
2448

    
2449
      if nimg.vm_capable:
2450
        self._VerifyNodeLVM(node_i, nresult, vg_name)
2451
        self._VerifyNodeDrbd(node_i, nresult, instanceinfo, drbd_helper,
2452
                             all_drbd_map)
2453

    
2454
        self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
2455
        self._UpdateNodeInstances(node_i, nresult, nimg)
2456
        self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
2457
        self._UpdateNodeOS(node_i, nresult, nimg)
2458
        if not nimg.os_fail:
2459
          if refos_img is None:
2460
            refos_img = nimg
2461
          self._VerifyNodeOS(node_i, nimg, refos_img)
2462
        self._VerifyNodeBridges(node_i, nresult, bridges)
2463

    
2464
    feedback_fn("* Verifying instance status")
2465
    for instance in instancelist:
2466
      if verbose:
2467
        feedback_fn("* Verifying instance %s" % instance)
2468
      inst_config = instanceinfo[instance]
2469
      self._VerifyInstance(instance, inst_config, node_image,
2470
                           instdisk[instance])
2471
      inst_nodes_offline = []
2472

    
2473
      pnode = inst_config.primary_node
2474
      pnode_img = node_image[pnode]
2475
      _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
2476
               self.ENODERPC, pnode, "instance %s, connection to"
2477
               " primary node failed", instance)
2478

    
2479
      _ErrorIf(inst_config.admin_up and pnode_img.offline,
2480
               self.EINSTANCEBADNODE, instance,
2481
               "instance is marked as running and lives on offline node %s",
2482
               inst_config.primary_node)
2483

    
2484
      # If the instance is non-redundant we cannot survive losing its primary
2485
      # node, so we are not N+1 compliant. On the other hand we have no disk
2486
      # templates with more than one secondary so that situation is not well
2487
      # supported either.
2488
      # FIXME: does not support file-backed instances
2489
      if not inst_config.secondary_nodes:
2490
        i_non_redundant.append(instance)
2491

    
2492
      _ErrorIf(len(inst_config.secondary_nodes) > 1, self.EINSTANCELAYOUT,
2493
               instance, "instance has multiple secondary nodes: %s",
2494
               utils.CommaJoin(inst_config.secondary_nodes),
2495
               code=self.ETYPE_WARNING)
2496

    
2497
      if inst_config.disk_template in constants.DTS_INT_MIRROR:
2498
        pnode = inst_config.primary_node
2499
        instance_nodes = utils.NiceSort(inst_config.all_nodes)
2500
        instance_groups = {}
2501

    
2502
        for node in instance_nodes:
2503
          instance_groups.setdefault(nodeinfo_byname[node].group,
2504
                                     []).append(node)
2505

    
2506
        pretty_list = [
2507
          "%s (group %s)" % (utils.CommaJoin(nodes), groupinfo[group].name)
2508
          # Sort so that we always list the primary node first.
2509
          for group, nodes in sorted(instance_groups.items(),
2510
                                     key=lambda (_, nodes): pnode in nodes,
2511
                                     reverse=True)]
2512

    
2513
        self._ErrorIf(len(instance_groups) > 1, self.EINSTANCESPLITGROUPS,
2514
                      instance, "instance has primary and secondary nodes in"
2515
                      " different groups: %s", utils.CommaJoin(pretty_list),
2516
                      code=self.ETYPE_WARNING)
2517

    
2518
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
2519
        i_non_a_balanced.append(instance)
2520

    
2521
      for snode in inst_config.secondary_nodes:
2522
        s_img = node_image[snode]
2523
        _ErrorIf(s_img.rpc_fail and not s_img.offline, self.ENODERPC, snode,
2524
                 "instance %s, connection to secondary node failed", instance)
2525

    
2526
        if s_img.offline:
2527
          inst_nodes_offline.append(snode)
2528

    
2529
      # warn that the instance lives on offline nodes
2530
      _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
2531
               "instance has offline secondary node(s) %s",
2532
               utils.CommaJoin(inst_nodes_offline))
2533
      # ... or ghost/non-vm_capable nodes
2534
      for node in inst_config.all_nodes:
2535
        _ErrorIf(node_image[node].ghost, self.EINSTANCEBADNODE, instance,
2536
                 "instance lives on ghost node %s", node)
2537
        _ErrorIf(not node_image[node].vm_capable, self.EINSTANCEBADNODE,
2538
                 instance, "instance lives on non-vm_capable node %s", node)
2539

    
2540
    feedback_fn("* Verifying orphan volumes")
2541
    reserved = utils.FieldSet(*cluster.reserved_lvs)
2542
    self._VerifyOrphanVolumes(node_vol_should, node_image, reserved)
2543

    
2544
    feedback_fn("* Verifying orphan instances")
2545
    self._VerifyOrphanInstances(instancelist, node_image)
2546

    
2547
    if constants.VERIFY_NPLUSONE_MEM not in self.op.skip_checks:
2548
      feedback_fn("* Verifying N+1 Memory redundancy")
2549
      self._VerifyNPlusOneMemory(node_image, instanceinfo)
2550

    
2551
    feedback_fn("* Other Notes")
2552
    if i_non_redundant:
2553
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
2554
                  % len(i_non_redundant))
2555

    
2556
    if i_non_a_balanced:
2557
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
2558
                  % len(i_non_a_balanced))
2559

    
2560
    if n_offline:
2561
      feedback_fn("  - NOTICE: %d offline node(s) found." % n_offline)
2562

    
2563
    if n_drained:
2564
      feedback_fn("  - NOTICE: %d drained node(s) found." % n_drained)
2565

    
2566
    return not self.bad
2567

    
2568
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
2569
    """Analyze the post-hooks' result
2570

2571
    This method analyses the hook result, handles it, and sends some
2572
    nicely-formatted feedback back to the user.
2573

2574
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
2575
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
2576
    @param hooks_results: the results of the multi-node hooks rpc call
2577
    @param feedback_fn: function used send feedback back to the caller
2578
    @param lu_result: previous Exec result
2579
    @return: the new Exec result, based on the previous result
2580
        and hook results
2581

2582
    """
2583
    # We only really run POST phase hooks, and are only interested in
2584
    # their results
2585
    if phase == constants.HOOKS_PHASE_POST:
2586
      # Used to change hooks' output to proper indentation
2587
      feedback_fn("* Hooks Results")
2588
      assert hooks_results, "invalid result from hooks"
2589

    
2590
      for node_name in hooks_results:
2591
        res = hooks_results[node_name]
2592
        msg = res.fail_msg
2593
        test = msg and not res.offline
2594
        self._ErrorIf(test, self.ENODEHOOKS, node_name,
2595
                      "Communication failure in hooks execution: %s", msg)
2596
        if res.offline or msg:
2597
          # No need to investigate payload if node is offline or gave an error.
2598
          # override manually lu_result here as _ErrorIf only
2599
          # overrides self.bad
2600
          lu_result = 1
2601
          continue
2602
        for script, hkr, output in res.payload:
2603
          test = hkr == constants.HKR_FAIL
2604
          self._ErrorIf(test, self.ENODEHOOKS, node_name,
2605
                        "Script %s failed, output:", script)
2606
          if test:
2607
            output = self._HOOKS_INDENT_RE.sub('      ', output)
2608
            feedback_fn("%s" % output)
2609
            lu_result = 0
2610

    
2611
      return lu_result
2612

    
2613

    
2614
class LUClusterVerifyDisks(NoHooksLU):
2615
  """Verifies the cluster disks status.
2616

2617
  """
2618
  REQ_BGL = False
2619

    
2620
  def ExpandNames(self):
2621
    self.needed_locks = {
2622
      locking.LEVEL_NODE: locking.ALL_SET,
2623
      locking.LEVEL_INSTANCE: locking.ALL_SET,
2624
    }
2625
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
2626

    
2627
  def Exec(self, feedback_fn):
2628
    """Verify integrity of cluster disks.
2629

2630
    @rtype: tuple of three items
2631
    @return: a tuple of (dict of node-to-node_error, list of instances
2632
        which need activate-disks, dict of instance: (node, volume) for
2633
        missing volumes
2634

2635
    """
2636
    result = res_nodes, res_instances, res_missing = {}, [], {}
2637

    
2638
    nodes = utils.NiceSort(self.cfg.GetVmCapableNodeList())
2639
    instances = self.cfg.GetAllInstancesInfo().values()
2640

    
2641
    nv_dict = {}
2642
    for inst in instances:
2643
      inst_lvs = {}
2644
      if not inst.admin_up:
2645
        continue
2646
      inst.MapLVsByNode(inst_lvs)
2647
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
2648
      for node, vol_list in inst_lvs.iteritems():
2649
        for vol in vol_list:
2650
          nv_dict[(node, vol)] = inst
2651

    
2652
    if not nv_dict:
2653
      return result
2654

    
2655
    node_lvs = self.rpc.call_lv_list(nodes, [])
2656
    for node, node_res in node_lvs.items():
2657
      if node_res.offline:
2658
        continue
2659
      msg = node_res.fail_msg
2660
      if msg:
2661
        logging.warning("Error enumerating LVs on node %s: %s", node, msg)
2662
        res_nodes[node] = msg
2663
        continue
2664

    
2665
      lvs = node_res.payload
2666
      for lv_name, (_, _, lv_online) in lvs.items():
2667
        inst = nv_dict.pop((node, lv_name), None)
2668
        if (not lv_online and inst is not None
2669
            and inst.name not in res_instances):
2670
          res_instances.append(inst.name)
2671

    
2672
    # any leftover items in nv_dict are missing LVs, let's arrange the
2673
    # data better
2674
    for key, inst in nv_dict.iteritems():
2675
      if inst.name not in res_missing:
2676
        res_missing[inst.name] = []
2677
      res_missing[inst.name].append(key)
2678

    
2679
    return result
2680

    
2681

    
2682
class LUClusterRepairDiskSizes(NoHooksLU):
2683
  """Verifies the cluster disks sizes.
2684

2685
  """
2686
  REQ_BGL = False
2687

    
2688
  def ExpandNames(self):
2689
    if self.op.instances:
2690
      self.wanted_names = _GetWantedInstances(self, self.op.instances)
2691
      self.needed_locks = {
2692
        locking.LEVEL_NODE: [],
2693
        locking.LEVEL_INSTANCE: self.wanted_names,
2694
        }
2695
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2696
    else:
2697
      self.wanted_names = None
2698
      self.needed_locks = {
2699
        locking.LEVEL_NODE: locking.ALL_SET,
2700
        locking.LEVEL_INSTANCE: locking.ALL_SET,
2701
        }
2702
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
2703

    
2704
  def DeclareLocks(self, level):
2705
    if level == locking.LEVEL_NODE and self.wanted_names is not None:
2706
      self._LockInstancesNodes(primary_only=True)
2707

    
2708
  def CheckPrereq(self):
2709
    """Check prerequisites.
2710

2711
    This only checks the optional instance list against the existing names.
2712

2713
    """
2714
    if self.wanted_names is None:
2715
      self.wanted_names = self.glm.list_owned(locking.LEVEL_INSTANCE)
2716

    
2717
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
2718
                             in self.wanted_names]
2719

    
2720
  def _EnsureChildSizes(self, disk):
2721
    """Ensure children of the disk have the needed disk size.
2722

2723
    This is valid mainly for DRBD8 and fixes an issue where the
2724
    children have smaller disk size.
2725

2726
    @param disk: an L{ganeti.objects.Disk} object
2727

2728
    """
2729
    if disk.dev_type == constants.LD_DRBD8:
2730
      assert disk.children, "Empty children for DRBD8?"
2731
      fchild = disk.children[0]
2732
      mismatch = fchild.size < disk.size
2733
      if mismatch:
2734
        self.LogInfo("Child disk has size %d, parent %d, fixing",
2735
                     fchild.size, disk.size)
2736
        fchild.size = disk.size
2737

    
2738
      # and we recurse on this child only, not on the metadev
2739
      return self._EnsureChildSizes(fchild) or mismatch
2740
    else:
2741
      return False
2742

    
2743
  def Exec(self, feedback_fn):
2744
    """Verify the size of cluster disks.
2745

2746
    """
2747
    # TODO: check child disks too
2748
    # TODO: check differences in size between primary/secondary nodes
2749
    per_node_disks = {}
2750
    for instance in self.wanted_instances:
2751
      pnode = instance.primary_node
2752
      if pnode not in per_node_disks:
2753
        per_node_disks[pnode] = []
2754
      for idx, disk in enumerate(instance.disks):
2755
        per_node_disks[pnode].append((instance, idx, disk))
2756

    
2757
    changed = []
2758
    for node, dskl in per_node_disks.items():
2759
      newl = [v[2].Copy() for v in dskl]
2760
      for dsk in newl:
2761
        self.cfg.SetDiskID(dsk, node)
2762
      result = self.rpc.call_blockdev_getsize(node, newl)
2763
      if result.fail_msg:
2764
        self.LogWarning("Failure in blockdev_getsize call to node"
2765
                        " %s, ignoring", node)
2766
        continue
2767
      if len(result.payload) != len(dskl):
2768
        logging.warning("Invalid result from node %s: len(dksl)=%d,"
2769
                        " result.payload=%s", node, len(dskl), result.payload)
2770
        self.LogWarning("Invalid result from node %s, ignoring node results",
2771
                        node)
2772
        continue
2773
      for ((instance, idx, disk), size) in zip(dskl, result.payload):
2774
        if size is None:
2775
          self.LogWarning("Disk %d of instance %s did not return size"
2776
                          " information, ignoring", idx, instance.name)
2777
          continue
2778
        if not isinstance(size, (int, long)):
2779
          self.LogWarning("Disk %d of instance %s did not return valid"
2780
                          " size information, ignoring", idx, instance.name)
2781
          continue
2782
        size = size >> 20
2783
        if size != disk.size:
2784
          self.LogInfo("Disk %d of instance %s has mismatched size,"
2785
                       " correcting: recorded %d, actual %d", idx,
2786
                       instance.name, disk.size, size)
2787
          disk.size = size
2788
          self.cfg.Update(instance, feedback_fn)
2789
          changed.append((instance.name, idx, size))
2790
        if self._EnsureChildSizes(disk):
2791
          self.cfg.Update(instance, feedback_fn)
2792
          changed.append((instance.name, idx, disk.size))
2793
    return changed
2794

    
2795

    
2796
class LUClusterRename(LogicalUnit):
2797
  """Rename the cluster.
2798

2799
  """
2800
  HPATH = "cluster-rename"
2801
  HTYPE = constants.HTYPE_CLUSTER
2802

    
2803
  def BuildHooksEnv(self):
2804
    """Build hooks env.
2805

2806
    """
2807
    return {
2808
      "OP_TARGET": self.cfg.GetClusterName(),
2809
      "NEW_NAME": self.op.name,
2810
      }
2811

    
2812
  def BuildHooksNodes(self):
2813
    """Build hooks nodes.
2814

2815
    """
2816
    return ([self.cfg.GetMasterNode()], self.cfg.GetNodeList())
2817

    
2818
  def CheckPrereq(self):
2819
    """Verify that the passed name is a valid one.
2820

2821
    """
2822
    hostname = netutils.GetHostname(name=self.op.name,
2823
                                    family=self.cfg.GetPrimaryIPFamily())
2824

    
2825
    new_name = hostname.name
2826
    self.ip = new_ip = hostname.ip
2827
    old_name = self.cfg.GetClusterName()
2828
    old_ip = self.cfg.GetMasterIP()
2829
    if new_name == old_name and new_ip == old_ip:
2830
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
2831
                                 " cluster has changed",
2832
                                 errors.ECODE_INVAL)
2833
    if new_ip != old_ip:
2834
      if netutils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
2835
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
2836
                                   " reachable on the network" %
2837
                                   new_ip, errors.ECODE_NOTUNIQUE)
2838

    
2839
    self.op.name = new_name
2840

    
2841
  def Exec(self, feedback_fn):
2842
    """Rename the cluster.
2843

2844
    """
2845
    clustername = self.op.name
2846
    ip = self.ip
2847

    
2848
    # shutdown the master IP
2849
    master = self.cfg.GetMasterNode()
2850
    result = self.rpc.call_node_stop_master(master, False)
2851
    result.Raise("Could not disable the master role")
2852

    
2853
    try:
2854
      cluster = self.cfg.GetClusterInfo()
2855
      cluster.cluster_name = clustername
2856
      cluster.master_ip = ip
2857
      self.cfg.Update(cluster, feedback_fn)
2858

    
2859
      # update the known hosts file
2860
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
2861
      node_list = self.cfg.GetOnlineNodeList()
2862
      try:
2863
        node_list.remove(master)
2864
      except ValueError:
2865
        pass
2866
      _UploadHelper(self, node_list, constants.SSH_KNOWN_HOSTS_FILE)
2867
    finally:
2868
      result = self.rpc.call_node_start_master(master, False, False)
2869
      msg = result.fail_msg
2870
      if msg:
2871
        self.LogWarning("Could not re-enable the master role on"
2872
                        " the master, please restart manually: %s", msg)
2873

    
2874
    return clustername
2875

    
2876

    
2877
class LUClusterSetParams(LogicalUnit):
2878
  """Change the parameters of the cluster.
2879

2880
  """
2881
  HPATH = "cluster-modify"
2882
  HTYPE = constants.HTYPE_CLUSTER
2883
  REQ_BGL = False
2884

    
2885
  def CheckArguments(self):
2886
    """Check parameters
2887

2888
    """
2889
    if self.op.uid_pool:
2890
      uidpool.CheckUidPool(self.op.uid_pool)
2891

    
2892
    if self.op.add_uids:
2893
      uidpool.CheckUidPool(self.op.add_uids)
2894

    
2895
    if self.op.remove_uids:
2896
      uidpool.CheckUidPool(self.op.remove_uids)
2897

    
2898
  def ExpandNames(self):
2899
    # FIXME: in the future maybe other cluster params won't require checking on
2900
    # all nodes to be modified.
2901
    self.needed_locks = {
2902
      locking.LEVEL_NODE: locking.ALL_SET,
2903
    }
2904
    self.share_locks[locking.LEVEL_NODE] = 1
2905

    
2906
  def BuildHooksEnv(self):
2907
    """Build hooks env.
2908

2909
    """
2910
    return {
2911
      "OP_TARGET": self.cfg.GetClusterName(),
2912
      "NEW_VG_NAME": self.op.vg_name,
2913
      }
2914

    
2915
  def BuildHooksNodes(self):
2916
    """Build hooks nodes.
2917

2918
    """
2919
    mn = self.cfg.GetMasterNode()
2920
    return ([mn], [mn])
2921

    
2922
  def CheckPrereq(self):
2923
    """Check prerequisites.
2924

2925
    This checks whether the given params don't conflict and
2926
    if the given volume group is valid.
2927

2928
    """
2929
    if self.op.vg_name is not None and not self.op.vg_name:
2930
      if self.cfg.HasAnyDiskOfType(constants.LD_LV):
2931
        raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
2932
                                   " instances exist", errors.ECODE_INVAL)
2933

    
2934
    if self.op.drbd_helper is not None and not self.op.drbd_helper:
2935
      if self.cfg.HasAnyDiskOfType(constants.LD_DRBD8):
2936
        raise errors.OpPrereqError("Cannot disable drbd helper while"
2937
                                   " drbd-based instances exist",
2938
                                   errors.ECODE_INVAL)
2939

    
2940
    node_list = self.glm.list_owned(locking.LEVEL_NODE)
2941

    
2942
    # if vg_name not None, checks given volume group on all nodes
2943
    if self.op.vg_name:
2944
      vglist = self.rpc.call_vg_list(node_list)
2945
      for node in node_list:
2946
        msg = vglist[node].fail_msg
2947
        if msg:
2948
          # ignoring down node
2949
          self.LogWarning("Error while gathering data on node %s"
2950
                          " (ignoring node): %s", node, msg)
2951
          continue
2952
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
2953
                                              self.op.vg_name,
2954
                                              constants.MIN_VG_SIZE)
2955
        if vgstatus:
2956
          raise errors.OpPrereqError("Error on node '%s': %s" %
2957
                                     (node, vgstatus), errors.ECODE_ENVIRON)
2958

    
2959
    if self.op.drbd_helper:
2960
      # checks given drbd helper on all nodes
2961
      helpers = self.rpc.call_drbd_helper(node_list)
2962
      for node in node_list:
2963
        ninfo = self.cfg.GetNodeInfo(node)
2964
        if ninfo.offline:
2965
          self.LogInfo("Not checking drbd helper on offline node %s", node)
2966
          continue
2967
        msg = helpers[node].fail_msg
2968
        if msg:
2969
          raise errors.OpPrereqError("Error checking drbd helper on node"
2970
                                     " '%s': %s" % (node, msg),
2971
                                     errors.ECODE_ENVIRON)
2972
        node_helper = helpers[node].payload
2973
        if node_helper != self.op.drbd_helper:
2974
          raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
2975
                                     (node, node_helper), errors.ECODE_ENVIRON)
2976

    
2977
    self.cluster = cluster = self.cfg.GetClusterInfo()
2978
    # validate params changes
2979
    if self.op.beparams:
2980
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
2981
      self.new_beparams = cluster.SimpleFillBE(self.op.beparams)
2982

    
2983
    if self.op.ndparams:
2984
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
2985
      self.new_ndparams = cluster.SimpleFillND(self.op.ndparams)
2986

    
2987
      # TODO: we need a more general way to handle resetting
2988
      # cluster-level parameters to default values
2989
      if self.new_ndparams["oob_program"] == "":
2990
        self.new_ndparams["oob_program"] = \
2991
            constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM]
2992

    
2993
    if self.op.nicparams:
2994
      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
2995
      self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
2996
      objects.NIC.CheckParameterSyntax(self.new_nicparams)
2997
      nic_errors = []
2998

    
2999
      # check all instances for consistency
3000
      for instance in self.cfg.GetAllInstancesInfo().values():
3001
        for nic_idx, nic in enumerate(instance.nics):
3002
          params_copy = copy.deepcopy(nic.nicparams)
3003
          params_filled = objects.FillDict(self.new_nicparams, params_copy)
3004

    
3005
          # check parameter syntax
3006
          try:
3007
            objects.NIC.CheckParameterSyntax(params_filled)
3008
          except errors.ConfigurationError, err:
3009
            nic_errors.append("Instance %s, nic/%d: %s" %
3010
                              (instance.name, nic_idx, err))
3011

    
3012
          # if we're moving instances to routed, check that they have an ip
3013
          target_mode = params_filled[constants.NIC_MODE]
3014
          if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
3015
            nic_errors.append("Instance %s, nic/%d: routed NIC with no ip"
3016
                              " address" % (instance.name, nic_idx))
3017
      if nic_errors:
3018
        raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
3019
                                   "\n".join(nic_errors))
3020

    
3021
    # hypervisor list/parameters
3022
    self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
3023
    if self.op.hvparams:
3024
      for hv_name, hv_dict in self.op.hvparams.items():
3025
        if hv_name not in self.new_hvparams:
3026
          self.new_hvparams[hv_name] = hv_dict
3027
        else:
3028
          self.new_hvparams[hv_name].update(hv_dict)
3029

    
3030
    # os hypervisor parameters
3031
    self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
3032
    if self.op.os_hvp:
3033
      for os_name, hvs in self.op.os_hvp.items():
3034
        if os_name not in self.new_os_hvp:
3035
          self.new_os_hvp[os_name] = hvs
3036
        else:
3037
          for hv_name, hv_dict in hvs.items():
3038
            if hv_name not in self.new_os_hvp[os_name]:
3039
              self.new_os_hvp[os_name][hv_name] = hv_dict
3040
            else:
3041
              self.new_os_hvp[os_name][hv_name].update(hv_dict)
3042

    
3043
    # os parameters
3044
    self.new_osp = objects.FillDict(cluster.osparams, {})
3045
    if self.op.osparams:
3046
      for os_name, osp in self.op.osparams.items():
3047
        if os_name not in self.new_osp:
3048
          self.new_osp[os_name] = {}
3049

    
3050
        self.new_osp[os_name] = _GetUpdatedParams(self.new_osp[os_name], osp,
3051
                                                  use_none=True)
3052

    
3053
        if not self.new_osp[os_name]:
3054
          # we removed all parameters
3055
          del self.new_osp[os_name]
3056
        else:
3057
          # check the parameter validity (remote check)
3058
          _CheckOSParams(self, False, [self.cfg.GetMasterNode()],
3059
                         os_name, self.new_osp[os_name])
3060

    
3061
    # changes to the hypervisor list
3062
    if self.op.enabled_hypervisors is not None:
3063
      self.hv_list = self.op.enabled_hypervisors
3064
      for hv in self.hv_list:
3065
        # if the hypervisor doesn't already exist in the cluster
3066
        # hvparams, we initialize it to empty, and then (in both
3067
        # cases) we make sure to fill the defaults, as we might not
3068
        # have a complete defaults list if the hypervisor wasn't
3069
        # enabled before
3070
        if hv not in new_hvp:
3071
          new_hvp[hv] = {}
3072
        new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
3073
        utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
3074
    else:
3075
      self.hv_list = cluster.enabled_hypervisors
3076

    
3077
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
3078
      # either the enabled list has changed, or the parameters have, validate
3079
      for hv_name, hv_params in self.new_hvparams.items():
3080
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
3081
            (self.op.enabled_hypervisors and
3082
             hv_name in self.op.enabled_hypervisors)):
3083
          # either this is a new hypervisor, or its parameters have changed
3084
          hv_class = hypervisor.GetHypervisor(hv_name)
3085
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
3086
          hv_class.CheckParameterSyntax(hv_params)
3087
          _CheckHVParams(self, node_list, hv_name, hv_params)
3088

    
3089
    if self.op.os_hvp:
3090
      # no need to check any newly-enabled hypervisors, since the
3091
      # defaults have already been checked in the above code-block
3092
      for os_name, os_hvp in self.new_os_hvp.items():
3093
        for hv_name, hv_params in os_hvp.items():
3094
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
3095
          # we need to fill in the new os_hvp on top of the actual hv_p
3096
          cluster_defaults = self.new_hvparams.get(hv_name, {})
3097
          new_osp = objects.FillDict(cluster_defaults, hv_params)
3098
          hv_class = hypervisor.GetHypervisor(hv_name)
3099
          hv_class.CheckParameterSyntax(new_osp)
3100
          _CheckHVParams(self, node_list, hv_name, new_osp)
3101

    
3102
    if self.op.default_iallocator:
3103
      alloc_script = utils.FindFile(self.op.default_iallocator,
3104
                                    constants.IALLOCATOR_SEARCH_PATH,
3105
                                    os.path.isfile)
3106
      if alloc_script is None:
3107
        raise errors.OpPrereqError("Invalid default iallocator script '%s'"
3108
                                   " specified" % self.op.default_iallocator,
3109
                                   errors.ECODE_INVAL)
3110

    
3111
  def Exec(self, feedback_fn):
3112
    """Change the parameters of the cluster.
3113

3114
    """
3115
    if self.op.vg_name is not None:
3116
      new_volume = self.op.vg_name
3117
      if not new_volume:
3118
        new_volume = None
3119
      if new_volume != self.cfg.GetVGName():
3120
        self.cfg.SetVGName(new_volume)
3121
      else:
3122
        feedback_fn("Cluster LVM configuration already in desired"
3123
                    " state, not changing")
3124
    if self.op.drbd_helper is not None:
3125
      new_helper = self.op.drbd_helper
3126
      if not new_helper:
3127
        new_helper = None
3128
      if new_helper != self.cfg.GetDRBDHelper():
3129
        self.cfg.SetDRBDHelper(new_helper)
3130
      else:
3131
        feedback_fn("Cluster DRBD helper already in desired state,"
3132
                    " not changing")
3133
    if self.op.hvparams:
3134
      self.cluster.hvparams = self.new_hvparams
3135
    if self.op.os_hvp:
3136
      self.cluster.os_hvp = self.new_os_hvp
3137
    if self.op.enabled_hypervisors is not None:
3138
      self.cluster.hvparams = self.new_hvparams
3139
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
3140
    if self.op.beparams:
3141
      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
3142
    if self.op.nicparams:
3143
      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
3144
    if self.op.osparams:
3145
      self.cluster.osparams = self.new_osp
3146
    if self.op.ndparams:
3147
      self.cluster.ndparams = self.new_ndparams
3148

    
3149
    if self.op.candidate_pool_size is not None:
3150
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
3151
      # we need to update the pool size here, otherwise the save will fail
3152
      _AdjustCandidatePool(self, [])
3153

    
3154
    if self.op.maintain_node_health is not None:
3155
      self.cluster.maintain_node_health = self.op.maintain_node_health
3156

    
3157
    if self.op.prealloc_wipe_disks is not None:
3158
      self.cluster.prealloc_wipe_disks = self.op.prealloc_wipe_disks
3159

    
3160
    if self.op.add_uids is not None:
3161
      uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
3162

    
3163
    if self.op.remove_uids is not None:
3164
      uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
3165

    
3166
    if self.op.uid_pool is not None:
3167
      self.cluster.uid_pool = self.op.uid_pool
3168

    
3169
    if self.op.default_iallocator is not None:
3170
      self.cluster.default_iallocator = self.op.default_iallocator
3171

    
3172
    if self.op.reserved_lvs is not None:
3173
      self.cluster.reserved_lvs = self.op.reserved_lvs
3174

    
3175
    def helper_os(aname, mods, desc):
3176
      desc += " OS list"
3177
      lst = getattr(self.cluster, aname)
3178
      for key, val in mods:
3179
        if key == constants.DDM_ADD:
3180
          if val in lst:
3181
            feedback_fn("OS %s already in %s, ignoring" % (val, desc))
3182
          else:
3183
            lst.append(val)
3184
        elif key == constants.DDM_REMOVE:
3185
          if val in lst:
3186
            lst.remove(val)
3187
          else:
3188
            feedback_fn("OS %s not found in %s, ignoring" % (val, desc))
3189
        else:
3190
          raise errors.ProgrammerError("Invalid modification '%s'" % key)
3191

    
3192
    if self.op.hidden_os:
3193
      helper_os("hidden_os", self.op.hidden_os, "hidden")
3194

    
3195
    if self.op.blacklisted_os:
3196
      helper_os("blacklisted_os", self.op.blacklisted_os, "blacklisted")
3197

    
3198
    if self.op.master_netdev:
3199
      master = self.cfg.GetMasterNode()
3200
      feedback_fn("Shutting down master ip on the current netdev (%s)" %
3201
                  self.cluster.master_netdev)
3202
      result = self.rpc.call_node_stop_master(master, False)
3203
      result.Raise("Could not disable the master ip")
3204
      feedback_fn("Changing master_netdev from %s to %s" %
3205
                  (self.cluster.master_netdev, self.op.master_netdev))
3206
      self.cluster.master_netdev = self.op.master_netdev
3207

    
3208
    self.cfg.Update(self.cluster, feedback_fn)
3209

    
3210
    if self.op.master_netdev:
3211
      feedback_fn("Starting the master ip on the new master netdev (%s)" %
3212
                  self.op.master_netdev)
3213
      result = self.rpc.call_node_start_master(master, False, False)
3214
      if result.fail_msg:
3215
        self.LogWarning("Could not re-enable the master ip on"
3216
                        " the master, please restart manually: %s",
3217
                        result.fail_msg)
3218

    
3219

    
3220
def _UploadHelper(lu, nodes, fname):
3221
  """Helper for uploading a file and showing warnings.
3222

3223
  """
3224
  if os.path.exists(fname):
3225
    result = lu.rpc.call_upload_file(nodes, fname)
3226
    for to_node, to_result in result.items():
3227
      msg = to_result.fail_msg
3228
      if msg:
3229
        msg = ("Copy of file %s to node %s failed: %s" %
3230
               (fname, to_node, msg))
3231
        lu.proc.LogWarning(msg)
3232

    
3233

    
3234
def _ComputeAncillaryFiles(cluster, redist):
3235
  """Compute files external to Ganeti which need to be consistent.
3236

3237
  @type redist: boolean
3238
  @param redist: Whether to include files which need to be redistributed
3239

3240
  """
3241
  # Compute files for all nodes
3242
  files_all = set([
3243
    constants.SSH_KNOWN_HOSTS_FILE,
3244
    constants.CONFD_HMAC_KEY,
3245
    constants.CLUSTER_DOMAIN_SECRET_FILE,
3246
    ])
3247

    
3248
  if not redist:
3249
    files_all.update(constants.ALL_CERT_FILES)
3250
    files_all.update(ssconf.SimpleStore().GetFileList())
3251

    
3252
  if cluster.modify_etc_hosts:
3253
    files_all.add(constants.ETC_HOSTS)
3254

    
3255
  # Files which must either exist on all nodes or on none
3256
  files_all_opt = set([
3257
    constants.RAPI_USERS_FILE,
3258
    ])
3259

    
3260
  # Files which should only be on master candidates
3261
  files_mc = set()
3262
  if not redist:
3263
    files_mc.add(constants.CLUSTER_CONF_FILE)
3264

    
3265
  # Files which should only be on VM-capable nodes
3266
  files_vm = set(filename
3267
    for hv_name in cluster.enabled_hypervisors
3268
    for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles())
3269

    
3270
  # Filenames must be unique
3271
  assert (len(files_all | files_all_opt | files_mc | files_vm) ==
3272
          sum(map(len, [files_all, files_all_opt, files_mc, files_vm]))), \
3273
         "Found file listed in more than one file list"
3274

    
3275
  return (files_all, files_all_opt, files_mc, files_vm)
3276

    
3277

    
3278
def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
3279
  """Distribute additional files which are part of the cluster configuration.
3280

3281
  ConfigWriter takes care of distributing the config and ssconf files, but
3282
  there are more files which should be distributed to all nodes. This function
3283
  makes sure those are copied.
3284

3285
  @param lu: calling logical unit
3286
  @param additional_nodes: list of nodes not in the config to distribute to
3287
  @type additional_vm: boolean
3288
  @param additional_vm: whether the additional nodes are vm-capable or not
3289

3290
  """
3291
  # Gather target nodes
3292
  cluster = lu.cfg.GetClusterInfo()
3293
  master_info = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
3294

    
3295
  online_nodes = lu.cfg.GetOnlineNodeList()
3296
  vm_nodes = lu.cfg.GetVmCapableNodeList()
3297

    
3298
  if additional_nodes is not None:
3299
    online_nodes.extend(additional_nodes)
3300
    if additional_vm:
3301
      vm_nodes.extend(additional_nodes)
3302

    
3303
  # Never distribute to master node
3304
  for nodelist in [online_nodes, vm_nodes]:
3305
    if master_info.name in nodelist:
3306
      nodelist.remove(master_info.name)
3307

    
3308
  # Gather file lists
3309
  (files_all, files_all_opt, files_mc, files_vm) = \
3310
    _ComputeAncillaryFiles(cluster, True)
3311

    
3312
  # Never re-distribute configuration file from here
3313
  assert not (constants.CLUSTER_CONF_FILE in files_all or
3314
              constants.CLUSTER_CONF_FILE in files_vm)
3315
  assert not files_mc, "Master candidates not handled in this function"
3316

    
3317
  filemap = [
3318
    (online_nodes, files_all),
3319
    (online_nodes, files_all_opt),
3320
    (vm_nodes, files_vm),
3321
    ]
3322

    
3323
  # Upload the files
3324
  for (node_list, files) in filemap:
3325
    for fname in files:
3326
      _UploadHelper(lu, node_list, fname)
3327

    
3328

    
3329
class LUClusterRedistConf(NoHooksLU):
3330
  """Force the redistribution of cluster configuration.
3331

3332
  This is a very simple LU.
3333

3334
  """
3335
  REQ_BGL = False
3336

    
3337
  def ExpandNames(self):
3338
    self.needed_locks = {
3339
      locking.LEVEL_NODE: locking.ALL_SET,
3340
    }
3341
    self.share_locks[locking.LEVEL_NODE] = 1
3342

    
3343
  def Exec(self, feedback_fn):
3344
    """Redistribute the configuration.
3345

3346
    """
3347
    self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
3348
    _RedistributeAncillaryFiles(self)
3349

    
3350

    
3351
def _WaitForSync(lu, instance, disks=None, oneshot=False):
3352
  """Sleep and poll for an instance's disk to sync.
3353

3354
  """
3355
  if not instance.disks or disks is not None and not disks:
3356
    return True
3357

    
3358
  disks = _ExpandCheckDisks(instance, disks)
3359

    
3360
  if not oneshot:
3361
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
3362

    
3363
  node = instance.primary_node
3364

    
3365
  for dev in disks:
3366
    lu.cfg.SetDiskID(dev, node)
3367

    
3368
  # TODO: Convert to utils.Retry
3369

    
3370
  retries = 0
3371
  degr_retries = 10 # in seconds, as we sleep 1 second each time
3372
  while True:
3373
    max_time = 0
3374
    done = True
3375
    cumul_degraded = False
3376
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, disks)
3377
    msg = rstats.fail_msg
3378
    if msg:
3379
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
3380
      retries += 1
3381
      if retries >= 10:
3382
        raise errors.RemoteError("Can't contact node %s for mirror data,"
3383
                                 " aborting." % node)
3384
      time.sleep(6)
3385
      continue
3386
    rstats = rstats.payload
3387
    retries = 0
3388
    for i, mstat in enumerate(rstats):
3389
      if mstat is None:
3390
        lu.LogWarning("Can't compute data for node %s/%s",
3391
                           node, disks[i].iv_name)
3392
        continue
3393

    
3394
      cumul_degraded = (cumul_degraded or
3395
                        (mstat.is_degraded and mstat.sync_percent is None))
3396
      if mstat.sync_percent is not None:
3397
        done = False
3398
        if mstat.estimated_time is not None:
3399
          rem_time = ("%s remaining (estimated)" %
3400
                      utils.FormatSeconds(mstat.estimated_time))
3401
          max_time = mstat.estimated_time
3402
        else:
3403
          rem_time = "no time estimate"
3404
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
3405
                        (disks[i].iv_name, mstat.sync_percent, rem_time))
3406

    
3407
    # if we're done but degraded, let's do a few small retries, to
3408
    # make sure we see a stable and not transient situation; therefore
3409
    # we force restart of the loop
3410
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
3411
      logging.info("Degraded disks found, %d retries left", degr_retries)
3412
      degr_retries -= 1
3413
      time.sleep(1)
3414
      continue
3415

    
3416
    if done or oneshot:
3417
      break
3418

    
3419
    time.sleep(min(60, max_time))
3420

    
3421
  if done:
3422
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
3423
  return not cumul_degraded
3424

    
3425

    
3426
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
3427
  """Check that mirrors are not degraded.
3428

3429
  The ldisk parameter, if True, will change the test from the
3430
  is_degraded attribute (which represents overall non-ok status for
3431
  the device(s)) to the ldisk (representing the local storage status).
3432

3433
  """
3434
  lu.cfg.SetDiskID(dev, node)
3435

    
3436
  result = True
3437

    
3438
  if on_primary or dev.AssembleOnSecondary():
3439
    rstats = lu.rpc.call_blockdev_find(node, dev)
3440
    msg = rstats.fail_msg
3441
    if msg:
3442
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
3443
      result = False
3444
    elif not rstats.payload:
3445
      lu.LogWarning("Can't find disk on node %s", node)
3446
      result = False
3447
    else:
3448
      if ldisk:
3449
        result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
3450
      else:
3451
        result = result and not rstats.payload.is_degraded
3452

    
3453
  if dev.children:
3454
    for child in dev.children:
3455
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
3456

    
3457
  return result
3458

    
3459

    
3460
class LUOobCommand(NoHooksLU):
3461
  """Logical unit for OOB handling.
3462

3463
  """
3464
  REG_BGL = False
3465
  _SKIP_MASTER = (constants.OOB_POWER_OFF, constants.OOB_POWER_CYCLE)
3466

    
3467
  def ExpandNames(self):
3468
    """Gather locks we need.
3469

3470
    """
3471
    if self.op.node_names:
3472
      self.op.node_names = _GetWantedNodes(self, self.op.node_names)
3473
      lock_names = self.op.node_names
3474
    else:
3475
      lock_names = locking.ALL_SET
3476

    
3477
    self.needed_locks = {
3478
      locking.LEVEL_NODE: lock_names,
3479
      }
3480

    
3481
  def CheckPrereq(self):
3482
    """Check prerequisites.
3483

3484
    This checks:
3485
     - the node exists in the configuration
3486
     - OOB is supported
3487

3488
    Any errors are signaled by raising errors.OpPrereqError.
3489

3490
    """
3491
    self.nodes = []
3492
    self.master_node = self.cfg.GetMasterNode()
3493

    
3494
    assert self.op.power_delay >= 0.0
3495

    
3496
    if self.op.node_names:
3497
      if (self.op.command in self._SKIP_MASTER and
3498
          self.master_node in self.op.node_names):
3499
        master_node_obj = self.cfg.GetNodeInfo(self.master_node)
3500
        master_oob_handler = _SupportsOob(self.cfg, master_node_obj)
3501

    
3502
        if master_oob_handler:
3503
          additional_text = ("run '%s %s %s' if you want to operate on the"
3504
                             " master regardless") % (master_oob_handler,
3505
                                                      self.op.command,
3506
                                                      self.master_node)
3507
        else:
3508
          additional_text = "it does not support out-of-band operations"
3509

    
3510
        raise errors.OpPrereqError(("Operating on the master node %s is not"
3511
                                    " allowed for %s; %s") %
3512
                                   (self.master_node, self.op.command,
3513
                                    additional_text), errors.ECODE_INVAL)
3514
    else:
3515
      self.op.node_names = self.cfg.GetNodeList()
3516
      if self.op.command in self._SKIP_MASTER:
3517
        self.op.node_names.remove(self.master_node)
3518

    
3519
    if self.op.command in self._SKIP_MASTER:
3520
      assert self.master_node not in self.op.node_names
3521

    
3522
    for node_name in self.op.node_names:
3523
      node = self.cfg.GetNodeInfo(node_name)
3524

    
3525
      if node is None:
3526
        raise errors.OpPrereqError("Node %s not found" % node_name,
3527
                                   errors.ECODE_NOENT)
3528
      else:
3529
        self.nodes.append(node)
3530

    
3531
      if (not self.op.ignore_status and
3532
          (self.op.command == constants.OOB_POWER_OFF and not node.offline)):
3533
        raise errors.OpPrereqError(("Cannot power off node %s because it is"
3534
                                    " not marked offline") % node_name,
3535
                                   errors.ECODE_STATE)
3536

    
3537
  def Exec(self, feedback_fn):
3538
    """Execute OOB and return result if we expect any.
3539

3540
    """
3541
    master_node = self.master_node
3542
    ret = []
3543

    
3544
    for idx, node in enumerate(utils.NiceSort(self.nodes,
3545
                                              key=lambda node: node.name)):
3546
      node_entry = [(constants.RS_NORMAL, node.name)]
3547
      ret.append(node_entry)
3548

    
3549
      oob_program = _SupportsOob(self.cfg, node)
3550

    
3551
      if not oob_program:
3552
        node_entry.append((constants.RS_UNAVAIL, None))
3553
        continue
3554

    
3555
      logging.info("Executing out-of-band command '%s' using '%s' on %s",
3556
                   self.op.command, oob_program, node.name)
3557
      result = self.rpc.call_run_oob(master_node, oob_program,
3558
                                     self.op.command, node.name,
3559
                                     self.op.timeout)
3560

    
3561
      if result.fail_msg:
3562
        self.LogWarning("Out-of-band RPC failed on node '%s': %s",
3563
                        node.name, result.fail_msg)
3564
        node_entry.append((constants.RS_NODATA, None))
3565
      else:
3566
        try:
3567
          self._CheckPayload(result)
3568
        except errors.OpExecError, err:
3569
          self.LogWarning("Payload returned by node '%s' is not valid: %s",
3570
                          node.name, err)
3571
          node_entry.append((constants.RS_NODATA, None))
3572
        else:
3573
          if self.op.command == constants.OOB_HEALTH:
3574
            # For health we should log important events
3575
            for item, status in result.payload:
3576
              if status in [constants.OOB_STATUS_WARNING,
3577
                            constants.OOB_STATUS_CRITICAL]:
3578
                self.LogWarning("Item '%s' on node '%s' has status '%s'",
3579
                                item, node.name, status)
3580

    
3581
          if self.op.command == constants.OOB_POWER_ON:
3582
            node.powered = True
3583
          elif self.op.command == constants.OOB_POWER_OFF:
3584
            node.powered = False
3585
          elif self.op.command == constants.OOB_POWER_STATUS:
3586
            powered = result.payload[constants.OOB_POWER_STATUS_POWERED]
3587
            if powered != node.powered:
3588
              logging.warning(("Recorded power state (%s) of node '%s' does not"
3589
                               " match actual power state (%s)"), node.powered,
3590
                              node.name, powered)
3591

    
3592
          # For configuration changing commands we should update the node
3593
          if self.op.command in (constants.OOB_POWER_ON,
3594
                                 constants.OOB_POWER_OFF):
3595
            self.cfg.Update(node, feedback_fn)
3596

    
3597
          node_entry.append((constants.RS_NORMAL, result.payload))
3598

    
3599
          if (self.op.command == constants.OOB_POWER_ON and
3600
              idx < len(self.nodes) - 1):
3601
            time.sleep(self.op.power_delay)
3602

    
3603
    return ret
3604

    
3605
  def _CheckPayload(self, result):
3606
    """Checks if the payload is valid.
3607

3608
    @param result: RPC result
3609
    @raises errors.OpExecError: If payload is not valid
3610

3611
    """
3612
    errs = []
3613
    if self.op.command == constants.OOB_HEALTH:
3614
      if not isinstance(result.payload, list):
3615
        errs.append("command 'health' is expected to return a list but got %s" %
3616
                    type(result.payload))
3617
      else:
3618
        for item, status in result.payload:
3619
          if status not in constants.OOB_STATUSES:
3620
            errs.append("health item '%s' has invalid status '%s'" %
3621
                        (item, status))
3622

    
3623
    if self.op.command == constants.OOB_POWER_STATUS:
3624
      if not isinstance(result.payload, dict):
3625
        errs.append("power-status is expected to return a dict but got %s" %
3626
                    type(result.payload))
3627

    
3628
    if self.op.command in [
3629
        constants.OOB_POWER_ON,
3630
        constants.OOB_POWER_OFF,
3631
        constants.OOB_POWER_CYCLE,
3632
        ]:
3633
      if result.payload is not None:
3634
        errs.append("%s is expected to not return payload but got '%s'" %
3635
                    (self.op.command, result.payload))
3636

    
3637
    if errs:
3638
      raise errors.OpExecError("Check of out-of-band payload failed due to %s" %
3639
                               utils.CommaJoin(errs))
3640

    
3641
class _OsQuery(_QueryBase):
3642
  FIELDS = query.OS_FIELDS
3643

    
3644
  def ExpandNames(self, lu):
3645
    # Lock all nodes in shared mode
3646
    # Temporary removal of locks, should be reverted later
3647
    # TODO: reintroduce locks when they are lighter-weight
3648
    lu.needed_locks = {}
3649
    #self.share_locks[locking.LEVEL_NODE] = 1
3650
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3651

    
3652
    # The following variables interact with _QueryBase._GetNames
3653
    if self.names:
3654
      self.wanted = self.names
3655
    else:
3656
      self.wanted = locking.ALL_SET
3657

    
3658
    self.do_locking = self.use_locking
3659

    
3660
  def DeclareLocks(self, lu, level):
3661
    pass
3662

    
3663
  @staticmethod
3664
  def _DiagnoseByOS(rlist):
3665
    """Remaps a per-node return list into an a per-os per-node dictionary
3666

3667
    @param rlist: a map with node names as keys and OS objects as values
3668

3669
    @rtype: dict
3670
    @return: a dictionary with osnames as keys and as value another
3671
        map, with nodes as keys and tuples of (path, status, diagnose,
3672
        variants, parameters, api_versions) as values, eg::
3673

3674
          {"debian-etch": {"node1": [(/usr/lib/..., True, "", [], []),
3675
                                     (/srv/..., False, "invalid api")],
3676
                           "node2": [(/srv/..., True, "", [], [])]}
3677
          }
3678

3679
    """
3680
    all_os = {}
3681
    # we build here the list of nodes that didn't fail the RPC (at RPC
3682
    # level), so that nodes with a non-responding node daemon don't
3683
    # make all OSes invalid
3684
    good_nodes = [node_name for node_name in rlist
3685
                  if not rlist[node_name].fail_msg]
3686
    for node_name, nr in rlist.items():
3687
      if nr.fail_msg or not nr.payload:
3688
        continue
3689
      for (name, path, status, diagnose, variants,
3690
           params, api_versions) in nr.payload:
3691
        if name not in all_os:
3692
          # build a list of nodes for this os containing empty lists
3693
          # for each node in node_list
3694
          all_os[name] = {}
3695
          for nname in good_nodes:
3696
            all_os[name][nname] = []
3697
        # convert params from [name, help] to (name, help)
3698
        params = [tuple(v) for v in params]
3699
        all_os[name][node_name].append((path, status, diagnose,
3700
                                        variants, params, api_versions))
3701
    return all_os
3702

    
3703
  def _GetQueryData(self, lu):
3704
    """Computes the list of nodes and their attributes.
3705

3706
    """
3707
    # Locking is not used
3708
    assert not (compat.any(lu.glm.is_owned(level)
3709
                           for level in locking.LEVELS
3710
                           if level != locking.LEVEL_CLUSTER) or
3711
                self.do_locking or self.use_locking)
3712

    
3713
    valid_nodes = [node.name
3714
                   for node in lu.cfg.GetAllNodesInfo().values()
3715
                   if not node.offline and node.vm_capable]
3716
    pol = self._DiagnoseByOS(lu.rpc.call_os_diagnose(valid_nodes))
3717
    cluster = lu.cfg.GetClusterInfo()
3718

    
3719
    data = {}
3720

    
3721
    for (os_name, os_data) in pol.items():
3722
      info = query.OsInfo(name=os_name, valid=True, node_status=os_data,
3723
                          hidden=(os_name in cluster.hidden_os),
3724
                          blacklisted=(os_name in cluster.blacklisted_os))
3725

    
3726
      variants = set()
3727
      parameters = set()
3728
      api_versions = set()
3729

    
3730
      for idx, osl in enumerate(os_data.values()):
3731
        info.valid = bool(info.valid and osl and osl[0][1])
3732
        if not info.valid:
3733
          break
3734

    
3735
        (node_variants, node_params, node_api) = osl[0][3:6]
3736
        if idx == 0:
3737
          # First entry
3738
          variants.update(node_variants)
3739
          parameters.update(node_params)
3740
          api_versions.update(node_api)
3741
        else:
3742
          # Filter out inconsistent values
3743
          variants.intersection_update(node_variants)
3744
          parameters.intersection_update(node_params)
3745
          api_versions.intersection_update(node_api)
3746

    
3747
      info.variants = list(variants)
3748
      info.parameters = list(parameters)
3749
      info.api_versions = list(api_versions)
3750

    
3751
      data[os_name] = info
3752

    
3753
    # Prepare data in requested order
3754
    return [data[name] for name in self._GetNames(lu, pol.keys(), None)
3755
            if name in data]
3756

    
3757

    
3758
class LUOsDiagnose(NoHooksLU):
3759
  """Logical unit for OS diagnose/query.
3760

3761
  """
3762
  REQ_BGL = False
3763

    
3764
  @staticmethod
3765
  def _BuildFilter(fields, names):
3766
    """Builds a filter for querying OSes.
3767

3768
    """
3769
    name_filter = qlang.MakeSimpleFilter("name", names)
3770

    
3771
    # Legacy behaviour: Hide hidden, blacklisted or invalid OSes if the
3772
    # respective field is not requested
3773
    status_filter = [[qlang.OP_NOT, [qlang.OP_TRUE, fname]]
3774
                     for fname in ["hidden", "blacklisted"]
3775
                     if fname not in fields]
3776
    if "valid" not in fields:
3777
      status_filter.append([qlang.OP_TRUE, "valid"])
3778

    
3779
    if status_filter:
3780
      status_filter.insert(0, qlang.OP_AND)
3781
    else:
3782
      status_filter = None
3783

    
3784
    if name_filter and status_filter:
3785
      return [qlang.OP_AND, name_filter, status_filter]
3786
    elif name_filter:
3787
      return name_filter
3788
    else:
3789
      return status_filter
3790

    
3791
  def CheckArguments(self):
3792
    self.oq = _OsQuery(self._BuildFilter(self.op.output_fields, self.op.names),
3793
                       self.op.output_fields, False)
3794

    
3795
  def ExpandNames(self):
3796
    self.oq.ExpandNames(self)
3797

    
3798
  def Exec(self, feedback_fn):
3799
    return self.oq.OldStyleQuery(self)
3800

    
3801

    
3802
class LUNodeRemove(LogicalUnit):
3803
  """Logical unit for removing a node.
3804

3805
  """
3806
  HPATH = "node-remove"
3807
  HTYPE = constants.HTYPE_NODE
3808

    
3809
  def BuildHooksEnv(self):
3810
    """Build hooks env.
3811

3812
    This doesn't run on the target node in the pre phase as a failed
3813
    node would then be impossible to remove.
3814

3815
    """
3816
    return {
3817
      "OP_TARGET": self.op.node_name,
3818
      "NODE_NAME": self.op.node_name,
3819
      }
3820

    
3821
  def BuildHooksNodes(self):
3822
    """Build hooks nodes.
3823

3824
    """
3825
    all_nodes = self.cfg.GetNodeList()
3826
    try:
3827
      all_nodes.remove(self.op.node_name)
3828
    except ValueError:
3829
      logging.warning("Node '%s', which is about to be removed, was not found"
3830
                      " in the list of all nodes", self.op.node_name)
3831
    return (all_nodes, all_nodes)
3832

    
3833
  def CheckPrereq(self):
3834
    """Check prerequisites.
3835

3836
    This checks:
3837
     - the node exists in the configuration
3838
     - it does not have primary or secondary instances
3839
     - it's not the master
3840

3841
    Any errors are signaled by raising errors.OpPrereqError.
3842

3843
    """
3844
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3845
    node = self.cfg.GetNodeInfo(self.op.node_name)
3846
    assert node is not None
3847

    
3848
    instance_list = self.cfg.GetInstanceList()
3849

    
3850
    masternode = self.cfg.GetMasterNode()
3851
    if node.name == masternode:
3852
      raise errors.OpPrereqError("Node is the master node, failover to another"
3853
                                 " node is required", errors.ECODE_INVAL)
3854

    
3855
    for instance_name in instance_list:
3856
      instance = self.cfg.GetInstanceInfo(instance_name)
3857
      if node.name in instance.all_nodes:
3858
        raise errors.OpPrereqError("Instance %s is still running on the node,"
3859
                                   " please remove first" % instance_name,
3860
                                   errors.ECODE_INVAL)
3861
    self.op.node_name = node.name
3862
    self.node = node
3863

    
3864
  def Exec(self, feedback_fn):
3865
    """Removes the node from the cluster.
3866

3867
    """
3868
    node = self.node
3869
    logging.info("Stopping the node daemon and removing configs from node %s",
3870
                 node.name)
3871

    
3872
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
3873

    
3874
    # Promote nodes to master candidate as needed
3875
    _AdjustCandidatePool(self, exceptions=[node.name])
3876
    self.context.RemoveNode(node.name)
3877

    
3878
    # Run post hooks on the node before it's removed
3879
    _RunPostHook(self, node.name)
3880

    
3881
    result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
3882
    msg = result.fail_msg
3883
    if msg:
3884
      self.LogWarning("Errors encountered on the remote node while leaving"
3885
                      " the cluster: %s", msg)
3886

    
3887
    # Remove node from our /etc/hosts
3888
    if self.cfg.GetClusterInfo().modify_etc_hosts:
3889
      master_node = self.cfg.GetMasterNode()
3890
      result = self.rpc.call_etc_hosts_modify(master_node,
3891
                                              constants.ETC_HOSTS_REMOVE,
3892
                                              node.name, None)
3893
      result.Raise("Can't update hosts file with new host data")
3894
      _RedistributeAncillaryFiles(self)
3895

    
3896

    
3897
class _NodeQuery(_QueryBase):
3898
  FIELDS = query.NODE_FIELDS
3899

    
3900
  def ExpandNames(self, lu):
3901
    lu.needed_locks = {}
3902
    lu.share_locks[locking.LEVEL_NODE] = 1
3903

    
3904
    if self.names:
3905
      self.wanted = _GetWantedNodes(lu, self.names)
3906
    else:
3907
      self.wanted = locking.ALL_SET
3908

    
3909
    self.do_locking = (self.use_locking and
3910
                       query.NQ_LIVE in self.requested_data)
3911

    
3912
    if self.do_locking:
3913
      # if we don't request only static fields, we need to lock the nodes
3914
      lu.needed_locks[locking.LEVEL_NODE] = self.wanted
3915

    
3916
  def DeclareLocks(self, lu, level):
3917
    pass
3918

    
3919
  def _GetQueryData(self, lu):
3920
    """Computes the list of nodes and their attributes.
3921

3922
    """
3923
    all_info = lu.cfg.GetAllNodesInfo()
3924

    
3925
    nodenames = self._GetNames(lu, all_info.keys(), locking.LEVEL_NODE)
3926

    
3927
    # Gather data as requested
3928
    if query.NQ_LIVE in self.requested_data:
3929
      # filter out non-vm_capable nodes
3930
      toquery_nodes = [name for name in nodenames if all_info[name].vm_capable]
3931

    
3932
      node_data = lu.rpc.call_node_info(toquery_nodes, lu.cfg.GetVGName(),
3933
                                        lu.cfg.GetHypervisorType())
3934
      live_data = dict((name, nresult.payload)
3935
                       for (name, nresult) in node_data.items()
3936
                       if not nresult.fail_msg and nresult.payload)
3937
    else:
3938
      live_data = None
3939

    
3940
    if query.NQ_INST in self.requested_data:
3941
      node_to_primary = dict([(name, set()) for name in nodenames])
3942
      node_to_secondary = dict([(name, set()) for name in nodenames])
3943

    
3944
      inst_data = lu.cfg.GetAllInstancesInfo()
3945

    
3946
      for inst in inst_data.values():
3947
        if inst.primary_node in node_to_primary:
3948
          node_to_primary[inst.primary_node].add(inst.name)
3949
        for secnode in inst.secondary_nodes:
3950
          if secnode in node_to_secondary:
3951
            node_to_secondary[secnode].add(inst.name)
3952
    else:
3953
      node_to_primary = None
3954
      node_to_secondary = None
3955

    
3956
    if query.NQ_OOB in self.requested_data:
3957
      oob_support = dict((name, bool(_SupportsOob(lu.cfg, node)))
3958
                         for name, node in all_info.iteritems())
3959
    else:
3960
      oob_support = None
3961

    
3962
    if query.NQ_GROUP in self.requested_data:
3963
      groups = lu.cfg.GetAllNodeGroupsInfo()
3964
    else:
3965
      groups = {}
3966

    
3967
    return query.NodeQueryData([all_info[name] for name in nodenames],
3968
                               live_data, lu.cfg.GetMasterNode(),
3969
                               node_to_primary, node_to_secondary, groups,
3970
                               oob_support, lu.cfg.GetClusterInfo())
3971

    
3972

    
3973
class LUNodeQuery(NoHooksLU):
3974
  """Logical unit for querying nodes.
3975

3976
  """
3977
  # pylint: disable-msg=W0142
3978
  REQ_BGL = False
3979

    
3980
  def CheckArguments(self):
3981
    self.nq = _NodeQuery(qlang.MakeSimpleFilter("name", self.op.names),
3982
                         self.op.output_fields, self.op.use_locking)
3983

    
3984
  def ExpandNames(self):
3985
    self.nq.ExpandNames(self)
3986

    
3987
  def Exec(self, feedback_fn):
3988
    return self.nq.OldStyleQuery(self)
3989

    
3990

    
3991
class LUNodeQueryvols(NoHooksLU):
3992
  """Logical unit for getting volumes on node(s).
3993

3994
  """
3995
  REQ_BGL = False
3996
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
3997
  _FIELDS_STATIC = utils.FieldSet("node")
3998

    
3999
  def CheckArguments(self):
4000
    _CheckOutputFields(static=self._FIELDS_STATIC,
4001
                       dynamic=self._FIELDS_DYNAMIC,
4002
                       selected=self.op.output_fields)
4003

    
4004
  def ExpandNames(self):
4005
    self.needed_locks = {}
4006
    self.share_locks[locking.LEVEL_NODE] = 1
4007
    if not self.op.nodes:
4008
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4009
    else:
4010
      self.needed_locks[locking.LEVEL_NODE] = \
4011
        _GetWantedNodes(self, self.op.nodes)
4012

    
4013
  def Exec(self, feedback_fn):
4014
    """Computes the list of nodes and their attributes.
4015

4016
    """
4017
    nodenames = self.glm.list_owned(locking.LEVEL_NODE)
4018
    volumes = self.rpc.call_node_volumes(nodenames)
4019

    
4020
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
4021
             in self.cfg.GetInstanceList()]
4022

    
4023
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
4024

    
4025
    output = []
4026
    for node in nodenames:
4027
      nresult = volumes[node]
4028
      if nresult.offline:
4029
        continue
4030
      msg = nresult.fail_msg
4031
      if msg:
4032
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
4033
        continue
4034

    
4035
      node_vols = nresult.payload[:]
4036
      node_vols.sort(key=lambda vol: vol['dev'])
4037

    
4038
      for vol in node_vols:
4039
        node_output = []
4040
        for field in self.op.output_fields:
4041
          if field == "node":
4042
            val = node
4043
          elif field == "phys":
4044
            val = vol['dev']
4045
          elif field == "vg":
4046
            val = vol['vg']
4047
          elif field == "name":
4048
            val = vol['name']
4049
          elif field == "size":
4050
            val = int(float(vol['size']))
4051
          elif field == "instance":
4052
            for inst in ilist:
4053
              if node not in lv_by_node[inst]:
4054
                continue
4055
              if vol['name'] in lv_by_node[inst][node]:
4056
                val = inst.name
4057
                break
4058
            else:
4059
              val = '-'
4060
          else:
4061
            raise errors.ParameterError(field)
4062
          node_output.append(str(val))
4063

    
4064
        output.append(node_output)
4065

    
4066
    return output
4067

    
4068

    
4069
class LUNodeQueryStorage(NoHooksLU):
4070
  """Logical unit for getting information on storage units on node(s).
4071

4072
  """
4073
  _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
4074
  REQ_BGL = False
4075

    
4076
  def CheckArguments(self):
4077
    _CheckOutputFields(static=self._FIELDS_STATIC,
4078
                       dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
4079
                       selected=self.op.output_fields)
4080

    
4081
  def ExpandNames(self):
4082
    self.needed_locks = {}
4083
    self.share_locks[locking.LEVEL_NODE] = 1
4084

    
4085
    if self.op.nodes:
4086
      self.needed_locks[locking.LEVEL_NODE] = \
4087
        _GetWantedNodes(self, self.op.nodes)
4088
    else:
4089
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4090

    
4091
  def Exec(self, feedback_fn):
4092
    """Computes the list of nodes and their attributes.
4093

4094
    """
4095
    self.nodes = self.glm.list_owned(locking.LEVEL_NODE)
4096

    
4097
    # Always get name to sort by
4098
    if constants.SF_NAME in self.op.output_fields:
4099
      fields = self.op.output_fields[:]
4100
    else:
4101
      fields = [constants.SF_NAME] + self.op.output_fields
4102

    
4103
    # Never ask for node or type as it's only known to the LU
4104
    for extra in [constants.SF_NODE, constants.SF_TYPE]:
4105
      while extra in fields:
4106
        fields.remove(extra)
4107

    
4108
    field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
4109
    name_idx = field_idx[constants.SF_NAME]
4110

    
4111
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
4112
    data = self.rpc.call_storage_list(self.nodes,
4113
                                      self.op.storage_type, st_args,
4114
                                      self.op.name, fields)
4115

    
4116
    result = []
4117

    
4118
    for node in utils.NiceSort(self.nodes):
4119
      nresult = data[node]
4120
      if nresult.offline:
4121
        continue
4122

    
4123
      msg = nresult.fail_msg
4124
      if msg:
4125
        self.LogWarning("Can't get storage data from node %s: %s", node, msg)
4126
        continue
4127

    
4128
      rows = dict([(row[name_idx], row) for row in nresult.payload])
4129

    
4130
      for name in utils.NiceSort(rows.keys()):
4131
        row = rows[name]
4132

    
4133
        out = []
4134

    
4135
        for field in self.op.output_fields:
4136
          if field == constants.SF_NODE:
4137
            val = node
4138
          elif field == constants.SF_TYPE:
4139
            val = self.op.storage_type
4140
          elif field in field_idx:
4141
            val = row[field_idx[field]]
4142
          else:
4143
            raise errors.ParameterError(field)
4144

    
4145
          out.append(val)
4146

    
4147
        result.append(out)
4148

    
4149
    return result
4150

    
4151

    
4152
class _InstanceQuery(_QueryBase):
4153
  FIELDS = query.INSTANCE_FIELDS
4154

    
4155
  def ExpandNames(self, lu):
4156
    lu.needed_locks = {}
4157
    lu.share_locks[locking.LEVEL_INSTANCE] = 1
4158
    lu.share_locks[locking.LEVEL_NODE] = 1
4159

    
4160
    if self.names:
4161
      self.wanted = _GetWantedInstances(lu, self.names)
4162
    else:
4163
      self.wanted = locking.ALL_SET
4164

    
4165
    self.do_locking = (self.use_locking and
4166
                       query.IQ_LIVE in self.requested_data)
4167
    if self.do_locking:
4168
      lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
4169
      lu.needed_locks[locking.LEVEL_NODE] = []
4170
      lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4171

    
4172
  def DeclareLocks(self, lu, level):
4173
    if level == locking.LEVEL_NODE and self.do_locking:
4174
      lu._LockInstancesNodes() # pylint: disable-msg=W0212
4175

    
4176
  def _GetQueryData(self, lu):
4177
    """Computes the list of instances and their attributes.
4178

4179
    """
4180
    cluster = lu.cfg.GetClusterInfo()
4181
    all_info = lu.cfg.GetAllInstancesInfo()
4182

    
4183
    instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
4184

    
4185
    instance_list = [all_info[name] for name in instance_names]
4186
    nodes = frozenset(itertools.chain(*(inst.all_nodes
4187
                                        for inst in instance_list)))
4188
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
4189
    bad_nodes = []
4190
    offline_nodes = []
4191
    wrongnode_inst = set()
4192

    
4193
    # Gather data as requested
4194
    if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
4195
      live_data = {}
4196
      node_data = lu.rpc.call_all_instances_info(nodes, hv_list)
4197
      for name in nodes:
4198
        result = node_data[name]
4199
        if result.offline:
4200
          # offline nodes will be in both lists
4201
          assert result.fail_msg
4202
          offline_nodes.append(name)
4203
        if result.fail_msg:
4204
          bad_nodes.append(name)
4205
        elif result.payload:
4206
          for inst in result.payload:
4207
            if inst in all_info:
4208
              if all_info[inst].primary_node == name:
4209
                live_data.update(result.payload)
4210
              else:
4211
                wrongnode_inst.add(inst)
4212
            else:
4213
              # orphan instance; we don't list it here as we don't
4214
              # handle this case yet in the output of instance listing
4215
              logging.warning("Orphan instance '%s' found on node %s",
4216
                              inst, name)
4217
        # else no instance is alive
4218
    else:
4219
      live_data = {}
4220

    
4221
    if query.IQ_DISKUSAGE in self.requested_data:
4222
      disk_usage = dict((inst.name,
4223
                         _ComputeDiskSize(inst.disk_template,
4224
                                          [{constants.IDISK_SIZE: disk.size}
4225
                                           for disk in inst.disks]))
4226
                        for inst in instance_list)
4227
    else:
4228
      disk_usage = None
4229

    
4230
    if query.IQ_CONSOLE in self.requested_data:
4231
      consinfo = {}
4232
      for inst in instance_list:
4233
        if inst.name in live_data:
4234
          # Instance is running
4235
          consinfo[inst.name] = _GetInstanceConsole(cluster, inst)
4236
        else:
4237
          consinfo[inst.name] = None
4238
      assert set(consinfo.keys()) == set(instance_names)
4239
    else:
4240
      consinfo = None
4241

    
4242
    return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
4243
                                   disk_usage, offline_nodes, bad_nodes,
4244
                                   live_data, wrongnode_inst, consinfo)
4245

    
4246

    
4247
class LUQuery(NoHooksLU):
4248
  """Query for resources/items of a certain kind.
4249

4250
  """
4251
  # pylint: disable-msg=W0142
4252
  REQ_BGL = False
4253

    
4254
  def CheckArguments(self):
4255
    qcls = _GetQueryImplementation(self.op.what)
4256

    
4257
    self.impl = qcls(self.op.filter, self.op.fields, False)
4258

    
4259
  def ExpandNames(self):
4260
    self.impl.ExpandNames(self)
4261

    
4262
  def DeclareLocks(self, level):
4263
    self.impl.DeclareLocks(self, level)
4264

    
4265
  def Exec(self, feedback_fn):
4266
    return self.impl.NewStyleQuery(self)
4267

    
4268

    
4269
class LUQueryFields(NoHooksLU):
4270
  """Query for resources/items of a certain kind.
4271

4272
  """
4273
  # pylint: disable-msg=W0142
4274
  REQ_BGL = False
4275

    
4276
  def CheckArguments(self):
4277
    self.qcls = _GetQueryImplementation(self.op.what)
4278

    
4279
  def ExpandNames(self):
4280
    self.needed_locks = {}
4281

    
4282
  def Exec(self, feedback_fn):
4283
    return query.QueryFields(self.qcls.FIELDS, self.op.fields)
4284

    
4285

    
4286
class LUNodeModifyStorage(NoHooksLU):
4287
  """Logical unit for modifying a storage volume on a node.
4288

4289
  """
4290
  REQ_BGL = False
4291

    
4292
  def CheckArguments(self):
4293
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4294

    
4295
    storage_type = self.op.storage_type
4296

    
4297
    try:
4298
      modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
4299
    except KeyError:
4300
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
4301
                                 " modified" % storage_type,
4302
                                 errors.ECODE_INVAL)
4303

    
4304
    diff = set(self.op.changes.keys()) - modifiable
4305
    if diff:
4306
      raise errors.OpPrereqError("The following fields can not be modified for"
4307
                                 " storage units of type '%s': %r" %
4308
                                 (storage_type, list(diff)),
4309
                                 errors.ECODE_INVAL)
4310

    
4311
  def ExpandNames(self):
4312
    self.needed_locks = {
4313
      locking.LEVEL_NODE: self.op.node_name,
4314
      }
4315

    
4316
  def Exec(self, feedback_fn):
4317
    """Computes the list of nodes and their attributes.
4318

4319
    """
4320
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
4321
    result = self.rpc.call_storage_modify(self.op.node_name,
4322
                                          self.op.storage_type, st_args,
4323
                                          self.op.name, self.op.changes)
4324
    result.Raise("Failed to modify storage unit '%s' on %s" %
4325
                 (self.op.name, self.op.node_name))
4326

    
4327

    
4328
class LUNodeAdd(LogicalUnit):
4329
  """Logical unit for adding node to the cluster.
4330

4331
  """
4332
  HPATH = "node-add"
4333
  HTYPE = constants.HTYPE_NODE
4334
  _NFLAGS = ["master_capable", "vm_capable"]
4335

    
4336
  def CheckArguments(self):
4337
    self.primary_ip_family = self.cfg.GetPrimaryIPFamily()
4338
    # validate/normalize the node name
4339
    self.hostname = netutils.GetHostname(name=self.op.node_name,
4340
                                         family=self.primary_ip_family)
4341
    self.op.node_name = self.hostname.name
4342

    
4343
    if self.op.readd and self.op.node_name == self.cfg.GetMasterNode():
4344
      raise errors.OpPrereqError("Cannot readd the master node",
4345
                                 errors.ECODE_STATE)
4346

    
4347
    if self.op.readd and self.op.group:
4348
      raise errors.OpPrereqError("Cannot pass a node group when a node is"
4349
                                 " being readded", errors.ECODE_INVAL)
4350

    
4351
  def BuildHooksEnv(self):
4352
    """Build hooks env.
4353

4354
    This will run on all nodes before, and on all nodes + the new node after.
4355

4356
    """
4357
    return {
4358
      "OP_TARGET": self.op.node_name,
4359
      "NODE_NAME": self.op.node_name,
4360
      "NODE_PIP": self.op.primary_ip,
4361
      "NODE_SIP": self.op.secondary_ip,
4362
      "MASTER_CAPABLE": str(self.op.master_capable),
4363
      "VM_CAPABLE": str(self.op.vm_capable),
4364
      }
4365

    
4366
  def BuildHooksNodes(self):
4367
    """Build hooks nodes.
4368

4369
    """
4370
    # Exclude added node
4371
    pre_nodes = list(set(self.cfg.GetNodeList()) - set([self.op.node_name]))
4372
    post_nodes = pre_nodes + [self.op.node_name, ]
4373

    
4374
    return (pre_nodes, post_nodes)
4375

    
4376
  def CheckPrereq(self):
4377
    """Check prerequisites.
4378

4379
    This checks:
4380
     - the new node is not already in the config
4381
     - it is resolvable
4382
     - its parameters (single/dual homed) matches the cluster
4383

4384
    Any errors are signaled by raising errors.OpPrereqError.
4385

4386
    """
4387
    cfg = self.cfg
4388
    hostname = self.hostname
4389
    node = hostname.name
4390
    primary_ip = self.op.primary_ip = hostname.ip
4391
    if self.op.secondary_ip is None:
4392
      if self.primary_ip_family == netutils.IP6Address.family:
4393
        raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
4394
                                   " IPv4 address must be given as secondary",
4395
                                   errors.ECODE_INVAL)
4396
      self.op.secondary_ip = primary_ip
4397

    
4398
    secondary_ip = self.op.secondary_ip
4399
    if not netutils.IP4Address.IsValid(secondary_ip):
4400
      raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
4401
                                 " address" % secondary_ip, errors.ECODE_INVAL)
4402

    
4403
    node_list = cfg.GetNodeList()
4404
    if not self.op.readd and node in node_list:
4405
      raise errors.OpPrereqError("Node %s is already in the configuration" %
4406
                                 node, errors.ECODE_EXISTS)
4407
    elif self.op.readd and node not in node_list:
4408
      raise errors.OpPrereqError("Node %s is not in the configuration" % node,
4409
                                 errors.ECODE_NOENT)
4410

    
4411
    self.changed_primary_ip = False
4412

    
4413
    for existing_node_name in node_list:
4414
      existing_node = cfg.GetNodeInfo(existing_node_name)
4415

    
4416
      if self.op.readd and node == existing_node_name:
4417
        if existing_node.secondary_ip != secondary_ip:
4418
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
4419
                                     " address configuration as before",
4420
                                     errors.ECODE_INVAL)
4421
        if existing_node.primary_ip != primary_ip:
4422
          self.changed_primary_ip = True
4423

    
4424
        continue
4425

    
4426
      if (existing_node.primary_ip == primary_ip or
4427
          existing_node.secondary_ip == primary_ip or
4428
          existing_node.primary_ip == secondary_ip or
4429
          existing_node.secondary_ip == secondary_ip):
4430
        raise errors.OpPrereqError("New node ip address(es) conflict with"
4431
                                   " existing node %s" % existing_node.name,
4432
                                   errors.ECODE_NOTUNIQUE)
4433

    
4434
    # After this 'if' block, None is no longer a valid value for the
4435
    # _capable op attributes
4436
    if self.op.readd:
4437
      old_node = self.cfg.GetNodeInfo(node)
4438
      assert old_node is not None, "Can't retrieve locked node %s" % node
4439
      for attr in self._NFLAGS:
4440
        if getattr(self.op, attr) is None:
4441
          setattr(self.op, attr, getattr(old_node, attr))
4442
    else:
4443
      for attr in self._NFLAGS:
4444
        if getattr(self.op, attr) is None:
4445
          setattr(self.op, attr, True)
4446

    
4447
    if self.op.readd and not self.op.vm_capable:
4448
      pri, sec = cfg.GetNodeInstances(node)
4449
      if pri or sec:
4450
        raise errors.OpPrereqError("Node %s being re-added with vm_capable"
4451
                                   " flag set to false, but it already holds"
4452
                                   " instances" % node,
4453
                                   errors.ECODE_STATE)
4454

    
4455
    # check that the type of the node (single versus dual homed) is the
4456
    # same as for the master
4457
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
4458
    master_singlehomed = myself.secondary_ip == myself.primary_ip
4459
    newbie_singlehomed = secondary_ip == primary_ip
4460
    if master_singlehomed != newbie_singlehomed:
4461
      if master_singlehomed:
4462
        raise errors.OpPrereqError("The master has no secondary ip but the"
4463
                                   " new node has one",
4464
                                   errors.ECODE_INVAL)
4465
      else:
4466
        raise errors.OpPrereqError("The master has a secondary ip but the"
4467
                                   " new node doesn't have one",
4468
                                   errors.ECODE_INVAL)
4469

    
4470
    # checks reachability
4471
    if not netutils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
4472
      raise errors.OpPrereqError("Node not reachable by ping",
4473
                                 errors.ECODE_ENVIRON)
4474

    
4475
    if not newbie_singlehomed:
4476
      # check reachability from my secondary ip to newbie's secondary ip
4477
      if not netutils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
4478
                           source=myself.secondary_ip):
4479
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
4480
                                   " based ping to node daemon port",
4481
                                   errors.ECODE_ENVIRON)
4482

    
4483
    if self.op.readd:
4484
      exceptions = [node]
4485
    else:
4486
      exceptions = []
4487

    
4488
    if self.op.master_capable:
4489
      self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
4490
    else:
4491
      self.master_candidate = False
4492

    
4493
    if self.op.readd:
4494
      self.new_node = old_node
4495
    else:
4496
      node_group = cfg.LookupNodeGroup(self.op.group)
4497
      self.new_node = objects.Node(name=node,
4498
                                   primary_ip=primary_ip,
4499
                                   secondary_ip=secondary_ip,
4500
                                   master_candidate=self.master_candidate,
4501
                                   offline=False, drained=False,
4502
                                   group=node_group)
4503

    
4504
    if self.op.ndparams:
4505
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
4506

    
4507
  def Exec(self, feedback_fn):
4508
    """Adds the new node to the cluster.
4509

4510
    """
4511
    new_node = self.new_node
4512
    node = new_node.name
4513

    
4514
    # We adding a new node so we assume it's powered
4515
    new_node.powered = True
4516

    
4517
    # for re-adds, reset the offline/drained/master-candidate flags;
4518
    # we need to reset here, otherwise offline would prevent RPC calls
4519
    # later in the procedure; this also means that if the re-add
4520
    # fails, we are left with a non-offlined, broken node
4521
    if self.op.readd:
4522
      new_node.drained = new_node.offline = False # pylint: disable-msg=W0201
4523
      self.LogInfo("Readding a node, the offline/drained flags were reset")
4524
      # if we demote the node, we do cleanup later in the procedure
4525
      new_node.master_candidate = self.master_candidate
4526
      if self.changed_primary_ip:
4527
        new_node.primary_ip = self.op.primary_ip
4528

    
4529
    # copy the master/vm_capable flags
4530
    for attr in self._NFLAGS:
4531
      setattr(new_node, attr, getattr(self.op, attr))
4532

    
4533
    # notify the user about any possible mc promotion
4534
    if new_node.master_candidate:
4535
      self.LogInfo("Node will be a master candidate")
4536

    
4537
    if self.op.ndparams:
4538
      new_node.ndparams = self.op.ndparams
4539
    else:
4540
      new_node.ndparams = {}
4541

    
4542
    # check connectivity
4543
    result = self.rpc.call_version([node])[node]
4544
    result.Raise("Can't get version information from node %s" % node)
4545
    if constants.PROTOCOL_VERSION == result.payload:
4546
      logging.info("Communication to node %s fine, sw version %s match",
4547
                   node, result.payload)
4548
    else:
4549
      raise errors.OpExecError("Version mismatch master version %s,"
4550
                               " node version %s" %
4551
                               (constants.PROTOCOL_VERSION, result.payload))
4552

    
4553
    # Add node to our /etc/hosts, and add key to known_hosts
4554
    if self.cfg.GetClusterInfo().modify_etc_hosts:
4555
      master_node = self.cfg.GetMasterNode()
4556
      result = self.rpc.call_etc_hosts_modify(master_node,
4557
                                              constants.ETC_HOSTS_ADD,
4558
                                              self.hostname.name,
4559
                                              self.hostname.ip)
4560
      result.Raise("Can't update hosts file with new host data")
4561

    
4562
    if new_node.secondary_ip != new_node.primary_ip:
4563
      _CheckNodeHasSecondaryIP(self, new_node.name, new_node.secondary_ip,
4564
                               False)
4565

    
4566
    node_verify_list = [self.cfg.GetMasterNode()]
4567
    node_verify_param = {
4568
      constants.NV_NODELIST: [node],
4569
      # TODO: do a node-net-test as well?
4570
    }
4571

    
4572
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
4573
                                       self.cfg.GetClusterName())
4574
    for verifier in node_verify_list:
4575
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
4576
      nl_payload = result[verifier].payload[constants.NV_NODELIST]
4577
      if nl_payload:
4578
        for failed in nl_payload:
4579
          feedback_fn("ssh/hostname verification failed"
4580
                      " (checking from %s): %s" %
4581
                      (verifier, nl_payload[failed]))
4582
        raise errors.OpExecError("ssh/hostname verification failed")
4583

    
4584
    if self.op.readd:
4585
      _RedistributeAncillaryFiles(self)
4586
      self.context.ReaddNode(new_node)
4587
      # make sure we redistribute the config
4588
      self.cfg.Update(new_node, feedback_fn)
4589
      # and make sure the new node will not have old files around
4590
      if not new_node.master_candidate:
4591
        result = self.rpc.call_node_demote_from_mc(new_node.name)
4592
        msg = result.fail_msg
4593
        if msg:
4594
          self.LogWarning("Node failed to demote itself from master"
4595
                          " candidate status: %s" % msg)
4596
    else:
4597
      _RedistributeAncillaryFiles(self, additional_nodes=[node],
4598
                                  additional_vm=self.op.vm_capable)
4599
      self.context.AddNode(new_node, self.proc.GetECId())
4600

    
4601

    
4602
class LUNodeSetParams(LogicalUnit):
4603
  """Modifies the parameters of a node.
4604

4605
  @cvar _F2R: a dictionary from tuples of flags (mc, drained, offline)
4606
      to the node role (as _ROLE_*)
4607
  @cvar _R2F: a dictionary from node role to tuples of flags
4608
  @cvar _FLAGS: a list of attribute names corresponding to the flags
4609

4610
  """
4611
  HPATH = "node-modify"
4612
  HTYPE = constants.HTYPE_NODE
4613
  REQ_BGL = False
4614
  (_ROLE_CANDIDATE, _ROLE_DRAINED, _ROLE_OFFLINE, _ROLE_REGULAR) = range(4)
4615
  _F2R = {
4616
    (True, False, False): _ROLE_CANDIDATE,
4617
    (False, True, False): _ROLE_DRAINED,
4618
    (False, False, True): _ROLE_OFFLINE,
4619
    (False, False, False): _ROLE_REGULAR,
4620
    }
4621
  _R2F = dict((v, k) for k, v in _F2R.items())
4622
  _FLAGS = ["master_candidate", "drained", "offline"]
4623

    
4624
  def CheckArguments(self):
4625
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4626
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained,
4627
                self.op.master_capable, self.op.vm_capable,
4628
                self.op.secondary_ip, self.op.ndparams]
4629
    if all_mods.count(None) == len(all_mods):
4630
      raise errors.OpPrereqError("Please pass at least one modification",
4631
                                 errors.ECODE_INVAL)
4632
    if all_mods.count(True) > 1:
4633
      raise errors.OpPrereqError("Can't set the node into more than one"
4634
                                 " state at the same time",
4635
                                 errors.ECODE_INVAL)
4636

    
4637
    # Boolean value that tells us whether we might be demoting from MC
4638
    self.might_demote = (self.op.master_candidate == False or
4639
                         self.op.offline == True or
4640
                         self.op.drained == True or
4641
                         self.op.master_capable == False)
4642

    
4643
    if self.op.secondary_ip:
4644
      if not netutils.IP4Address.IsValid(self.op.secondary_ip):
4645
        raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
4646
                                   " address" % self.op.secondary_ip,
4647
                                   errors.ECODE_INVAL)
4648

    
4649
    self.lock_all = self.op.auto_promote and self.might_demote
4650
    self.lock_instances = self.op.secondary_ip is not None
4651

    
4652
  def ExpandNames(self):
4653
    if self.lock_all:
4654
      self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
4655
    else:
4656
      self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
4657

    
4658
    if self.lock_instances:
4659
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
4660

    
4661
  def DeclareLocks(self, level):
4662
    # If we have locked all instances, before waiting to lock nodes, release
4663
    # all the ones living on nodes unrelated to the current operation.
4664
    if level == locking.LEVEL_NODE and self.lock_instances:
4665
      self.affected_instances = []
4666
      if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4667
        instances_keep = []
4668

    
4669
        # Build list of instances to release
4670
        for instance_name in self.glm.list_owned(locking.LEVEL_INSTANCE):
4671
          instance = self.context.cfg.GetInstanceInfo(instance_name)
4672
          if (instance.disk_template in constants.DTS_INT_MIRROR and
4673
              self.op.node_name in instance.all_nodes):
4674
            instances_keep.append(instance_name)
4675
            self.affected_instances.append(instance)
4676

    
4677
        _ReleaseLocks(self, locking.LEVEL_INSTANCE, keep=instances_keep)
4678

    
4679
        assert (set(self.glm.list_owned(locking.LEVEL_INSTANCE)) ==
4680
                set(instances_keep))
4681

    
4682
  def BuildHooksEnv(self):
4683
    """Build hooks env.
4684

4685
    This runs on the master node.
4686

4687
    """
4688
    return {
4689
      "OP_TARGET": self.op.node_name,
4690
      "MASTER_CANDIDATE": str(self.op.master_candidate),
4691
      "OFFLINE": str(self.op.offline),
4692
      "DRAINED": str(self.op.drained),
4693
      "MASTER_CAPABLE": str(self.op.master_capable),
4694
      "VM_CAPABLE": str(self.op.vm_capable),
4695
      }
4696

    
4697
  def BuildHooksNodes(self):
4698
    """Build hooks nodes.
4699

4700
    """
4701
    nl = [self.cfg.GetMasterNode(), self.op.node_name]
4702
    return (nl, nl)
4703

    
4704
  def CheckPrereq(self):
4705
    """Check prerequisites.
4706

4707
    This only checks the instance list against the existing names.
4708

4709
    """
4710
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
4711

    
4712
    if (self.op.master_candidate is not None or
4713
        self.op.drained is not None or
4714
        self.op.offline is not None):
4715
      # we can't change the master's node flags
4716
      if self.op.node_name == self.cfg.GetMasterNode():
4717
        raise errors.OpPrereqError("The master role can be changed"
4718
                                   " only via master-failover",
4719
                                   errors.ECODE_INVAL)
4720

    
4721
    if self.op.master_candidate and not node.master_capable:
4722
      raise errors.OpPrereqError("Node %s is not master capable, cannot make"
4723
                                 " it a master candidate" % node.name,
4724
                                 errors.ECODE_STATE)
4725

    
4726
    if self.op.vm_capable == False:
4727
      (ipri, isec) = self.cfg.GetNodeInstances(self.op.node_name)
4728
      if ipri or isec:
4729
        raise errors.OpPrereqError("Node %s hosts instances, cannot unset"
4730
                                   " the vm_capable flag" % node.name,
4731
                                   errors.ECODE_STATE)
4732

    
4733
    if node.master_candidate and self.might_demote and not self.lock_all:
4734
      assert not self.op.auto_promote, "auto_promote set but lock_all not"
4735
      # check if after removing the current node, we're missing master
4736
      # candidates
4737
      (mc_remaining, mc_should, _) = \
4738
          self.cfg.GetMasterCandidateStats(exceptions=[node.name])
4739
      if mc_remaining < mc_should:
4740
        raise errors.OpPrereqError("Not enough master candidates, please"
4741
                                   " pass auto promote option to allow"
4742
                                   " promotion", errors.ECODE_STATE)
4743

    
4744
    self.old_flags = old_flags = (node.master_candidate,
4745
                                  node.drained, node.offline)
4746
    assert old_flags in self._F2R, "Un-handled old flags %s" % str(old_flags)
4747
    self.old_role = old_role = self._F2R[old_flags]
4748

    
4749
    # Check for ineffective changes
4750
    for attr in self._FLAGS:
4751
      if (getattr(self.op, attr) == False and getattr(node, attr) == False):
4752
        self.LogInfo("Ignoring request to unset flag %s, already unset", attr)
4753
        setattr(self.op, attr, None)
4754

    
4755
    # Past this point, any flag change to False means a transition
4756
    # away from the respective state, as only real changes are kept
4757

    
4758
    # TODO: We might query the real power state if it supports OOB
4759
    if _SupportsOob(self.cfg, node):
4760
      if self.op.offline is False and not (node.powered or
4761
                                           self.op.powered == True):
4762
        raise errors.OpPrereqError(("Node %s needs to be turned on before its"
4763
                                    " offline status can be reset") %
4764
                                   self.op.node_name)
4765
    elif self.op.powered is not None:
4766
      raise errors.OpPrereqError(("Unable to change powered state for node %s"
4767
                                  " as it does not support out-of-band"
4768
                                  " handling") % self.op.node_name)
4769

    
4770
    # If we're being deofflined/drained, we'll MC ourself if needed
4771
    if (self.op.drained == False or self.op.offline == False or
4772
        (self.op.master_capable and not node.master_capable)):
4773
      if _DecideSelfPromotion(self):
4774
        self.op.master_candidate = True
4775
        self.LogInfo("Auto-promoting node to master candidate")
4776

    
4777
    # If we're no longer master capable, we'll demote ourselves from MC
4778
    if self.op.master_capable == False and node.master_candidate:
4779
      self.LogInfo("Demoting from master candidate")
4780
      self.op.master_candidate = False
4781

    
4782
    # Compute new role
4783
    assert [getattr(self.op, attr) for attr in self._FLAGS].count(True) <= 1
4784
    if self.op.master_candidate:
4785
      new_role = self._ROLE_CANDIDATE
4786
    elif self.op.drained:
4787
      new_role = self._ROLE_DRAINED
4788
    elif self.op.offline:
4789
      new_role = self._ROLE_OFFLINE
4790
    elif False in [self.op.master_candidate, self.op.drained, self.op.offline]:
4791
      # False is still in new flags, which means we're un-setting (the
4792
      # only) True flag
4793
      new_role = self._ROLE_REGULAR
4794
    else: # no new flags, nothing, keep old role
4795
      new_role = old_role
4796

    
4797
    self.new_role = new_role
4798

    
4799
    if old_role == self._ROLE_OFFLINE and new_role != old_role:
4800
      # Trying to transition out of offline status
4801
      result = self.rpc.call_version([node.name])[node.name]
4802
      if result.fail_msg:
4803
        raise errors.OpPrereqError("Node %s is being de-offlined but fails"
4804
                                   " to report its version: %s" %
4805
                                   (node.name, result.fail_msg),
4806
                                   errors.ECODE_STATE)
4807
      else:
4808
        self.LogWarning("Transitioning node from offline to online state"
4809
                        " without using re-add. Please make sure the node"
4810
                        " is healthy!")
4811

    
4812
    if self.op.secondary_ip:
4813
      # Ok even without locking, because this can't be changed by any LU
4814
      master = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
4815
      master_singlehomed = master.secondary_ip == master.primary_ip
4816
      if master_singlehomed and self.op.secondary_ip:
4817
        raise errors.OpPrereqError("Cannot change the secondary ip on a single"
4818
                                   " homed cluster", errors.ECODE_INVAL)
4819

    
4820
      if node.offline:
4821
        if self.affected_instances:
4822
          raise errors.OpPrereqError("Cannot change secondary ip: offline"
4823
                                     " node has instances (%s) configured"
4824
                                     " to use it" % self.affected_instances)
4825
      else:
4826
        # On online nodes, check that no instances are running, and that
4827
        # the node has the new ip and we can reach it.
4828
        for instance in self.affected_instances:
4829
          _CheckInstanceDown(self, instance, "cannot change secondary ip")
4830

    
4831
        _CheckNodeHasSecondaryIP(self, node.name, self.op.secondary_ip, True)
4832
        if master.name != node.name:
4833
          # check reachability from master secondary ip to new secondary ip
4834
          if not netutils.TcpPing(self.op.secondary_ip,
4835
                                  constants.DEFAULT_NODED_PORT,
4836
                                  source=master.secondary_ip):
4837
            raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
4838
                                       " based ping to node daemon port",
4839
                                       errors.ECODE_ENVIRON)
4840

    
4841
    if self.op.ndparams:
4842
      new_ndparams = _GetUpdatedParams(self.node.ndparams, self.op.ndparams)
4843
      utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
4844
      self.new_ndparams = new_ndparams
4845

    
4846
  def Exec(self, feedback_fn):
4847
    """Modifies a node.
4848

4849
    """
4850
    node = self.node
4851
    old_role = self.old_role
4852
    new_role = self.new_role
4853

    
4854
    result = []
4855

    
4856
    if self.op.ndparams:
4857
      node.ndparams = self.new_ndparams
4858

    
4859
    if self.op.powered is not None:
4860
      node.powered = self.op.powered
4861

    
4862
    for attr in ["master_capable", "vm_capable"]:
4863
      val = getattr(self.op, attr)
4864
      if val is not None:
4865
        setattr(node, attr, val)
4866
        result.append((attr, str(val)))
4867

    
4868
    if new_role != old_role:
4869
      # Tell the node to demote itself, if no longer MC and not offline
4870
      if old_role == self._ROLE_CANDIDATE and new_role != self._ROLE_OFFLINE:
4871
        msg = self.rpc.call_node_demote_from_mc(node.name).fail_msg
4872
        if msg:
4873
          self.LogWarning("Node failed to demote itself: %s", msg)
4874

    
4875
      new_flags = self._R2F[new_role]
4876
      for of, nf, desc in zip(self.old_flags, new_flags, self._FLAGS):
4877
        if of != nf:
4878
          result.append((desc, str(nf)))
4879
      (node.master_candidate, node.drained, node.offline) = new_flags
4880

    
4881
      # we locked all nodes, we adjust the CP before updating this node
4882
      if self.lock_all:
4883
        _AdjustCandidatePool(self, [node.name])
4884

    
4885
    if self.op.secondary_ip:
4886
      node.secondary_ip = self.op.secondary_ip
4887
      result.append(("secondary_ip", self.op.secondary_ip))
4888

    
4889
    # this will trigger configuration file update, if needed
4890
    self.cfg.Update(node, feedback_fn)
4891

    
4892
    # this will trigger job queue propagation or cleanup if the mc
4893
    # flag changed
4894
    if [old_role, new_role].count(self._ROLE_CANDIDATE) == 1:
4895
      self.context.ReaddNode(node)
4896

    
4897
    return result
4898

    
4899

    
4900
class LUNodePowercycle(NoHooksLU):
4901
  """Powercycles a node.
4902

4903
  """
4904
  REQ_BGL = False
4905

    
4906
  def CheckArguments(self):
4907
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4908
    if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
4909
      raise errors.OpPrereqError("The node is the master and the force"
4910
                                 " parameter was not set",
4911
                                 errors.ECODE_INVAL)
4912

    
4913
  def ExpandNames(self):
4914
    """Locking for PowercycleNode.
4915

4916
    This is a last-resort option and shouldn't block on other
4917
    jobs. Therefore, we grab no locks.
4918

4919
    """
4920
    self.needed_locks = {}
4921

    
4922
  def Exec(self, feedback_fn):
4923
    """Reboots a node.
4924

4925
    """
4926
    result = self.rpc.call_node_powercycle(self.op.node_name,
4927
                                           self.cfg.GetHypervisorType())
4928
    result.Raise("Failed to schedule the reboot")
4929
    return result.payload
4930

    
4931

    
4932
class LUClusterQuery(NoHooksLU):
4933
  """Query cluster configuration.
4934

4935
  """
4936
  REQ_BGL = False
4937

    
4938
  def ExpandNames(self):
4939
    self.needed_locks = {}
4940

    
4941
  def Exec(self, feedback_fn):
4942
    """Return cluster config.
4943

4944
    """
4945
    cluster = self.cfg.GetClusterInfo()
4946
    os_hvp = {}
4947

    
4948
    # Filter just for enabled hypervisors
4949
    for os_name, hv_dict in cluster.os_hvp.items():
4950
      os_hvp[os_name] = {}
4951
      for hv_name, hv_params in hv_dict.items():
4952
        if hv_name in cluster.enabled_hypervisors:
4953
          os_hvp[os_name][hv_name] = hv_params
4954

    
4955
    # Convert ip_family to ip_version
4956
    primary_ip_version = constants.IP4_VERSION
4957
    if cluster.primary_ip_family == netutils.IP6Address.family:
4958
      primary_ip_version = constants.IP6_VERSION
4959

    
4960
    result = {
4961
      "software_version": constants.RELEASE_VERSION,
4962
      "protocol_version": constants.PROTOCOL_VERSION,
4963
      "config_version": constants.CONFIG_VERSION,
4964
      "os_api_version": max(constants.OS_API_VERSIONS),
4965
      "export_version": constants.EXPORT_VERSION,
4966
      "architecture": (platform.architecture()[0], platform.machine()),
4967
      "name": cluster.cluster_name,
4968
      "master": cluster.master_node,
4969
      "default_hypervisor": cluster.enabled_hypervisors[0],
4970
      "enabled_hypervisors": cluster.enabled_hypervisors,
4971
      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
4972
                        for hypervisor_name in cluster.enabled_hypervisors]),
4973
      "os_hvp": os_hvp,
4974
      "beparams": cluster.beparams,
4975
      "osparams": cluster.osparams,
4976
      "nicparams": cluster.nicparams,
4977
      "ndparams": cluster.ndparams,
4978
      "candidate_pool_size": cluster.candidate_pool_size,
4979
      "master_netdev": cluster.master_netdev,
4980
      "volume_group_name": cluster.volume_group_name,
4981
      "drbd_usermode_helper": cluster.drbd_usermode_helper,
4982
      "file_storage_dir": cluster.file_storage_dir,
4983
      "shared_file_storage_dir": cluster.shared_file_storage_dir,
4984
      "maintain_node_health": cluster.maintain_node_health,
4985
      "ctime": cluster.ctime,
4986
      "mtime": cluster.mtime,
4987
      "uuid": cluster.uuid,
4988
      "tags": list(cluster.GetTags()),
4989
      "uid_pool": cluster.uid_pool,
4990
      "default_iallocator": cluster.default_iallocator,
4991
      "reserved_lvs": cluster.reserved_lvs,
4992
      "primary_ip_version": primary_ip_version,
4993
      "prealloc_wipe_disks": cluster.prealloc_wipe_disks,
4994
      "hidden_os": cluster.hidden_os,
4995
      "blacklisted_os": cluster.blacklisted_os,
4996
      }
4997

    
4998
    return result
4999

    
5000

    
5001
class LUClusterConfigQuery(NoHooksLU):
5002
  """Return configuration values.
5003

5004
  """
5005
  REQ_BGL = False
5006
  _FIELDS_DYNAMIC = utils.FieldSet()
5007
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag",
5008
                                  "watcher_pause", "volume_group_name")
5009

    
5010
  def CheckArguments(self):
5011
    _CheckOutputFields(static=self._FIELDS_STATIC,
5012
                       dynamic=self._FIELDS_DYNAMIC,
5013
                       selected=self.op.output_fields)
5014

    
5015
  def ExpandNames(self):
5016
    self.needed_locks = {}
5017

    
5018
  def Exec(self, feedback_fn):
5019
    """Dump a representation of the cluster config to the standard output.
5020

5021
    """
5022
    values = []
5023
    for field in self.op.output_fields:
5024
      if field == "cluster_name":
5025
        entry = self.cfg.GetClusterName()
5026
      elif field == "master_node":
5027
        entry = self.cfg.GetMasterNode()
5028
      elif field == "drain_flag":
5029
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
5030
      elif field == "watcher_pause":
5031
        entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
5032
      elif field == "volume_group_name":
5033
        entry = self.cfg.GetVGName()
5034
      else:
5035
        raise errors.ParameterError(field)
5036
      values.append(entry)
5037
    return values
5038

    
5039

    
5040
class LUInstanceActivateDisks(NoHooksLU):
5041
  """Bring up an instance's disks.
5042

5043
  """
5044
  REQ_BGL = False
5045

    
5046
  def ExpandNames(self):
5047
    self._ExpandAndLockInstance()
5048
    self.needed_locks[locking.LEVEL_NODE] = []
5049
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5050

    
5051
  def DeclareLocks(self, level):
5052
    if level == locking.LEVEL_NODE:
5053
      self._LockInstancesNodes()
5054

    
5055
  def CheckPrereq(self):
5056
    """Check prerequisites.
5057

5058
    This checks that the instance is in the cluster.
5059

5060
    """
5061
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5062
    assert self.instance is not None, \
5063
      "Cannot retrieve locked instance %s" % self.op.instance_name
5064
    _CheckNodeOnline(self, self.instance.primary_node)
5065

    
5066
  def Exec(self, feedback_fn):
5067
    """Activate the disks.
5068

5069
    """
5070
    disks_ok, disks_info = \
5071
              _AssembleInstanceDisks(self, self.instance,
5072
                                     ignore_size=self.op.ignore_size)
5073
    if not disks_ok:
5074
      raise errors.OpExecError("Cannot activate block devices")
5075

    
5076
    return disks_info
5077

    
5078

    
5079
def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
5080
                           ignore_size=False):
5081
  """Prepare the block devices for an instance.
5082

5083
  This sets up the block devices on all nodes.
5084

5085
  @type lu: L{LogicalUnit}
5086
  @param lu: the logical unit on whose behalf we execute
5087
  @type instance: L{objects.Instance}
5088
  @param instance: the instance for whose disks we assemble
5089
  @type disks: list of L{objects.Disk} or None
5090
  @param disks: which disks to assemble (or all, if None)
5091
  @type ignore_secondaries: boolean
5092
  @param ignore_secondaries: if true, errors on secondary nodes
5093
      won't result in an error return from the function
5094
  @type ignore_size: boolean
5095
  @param ignore_size: if true, the current known size of the disk
5096
      will not be used during the disk activation, useful for cases
5097
      when the size is wrong
5098
  @return: False if the operation failed, otherwise a list of
5099
      (host, instance_visible_name, node_visible_name)
5100
      with the mapping from node devices to instance devices
5101

5102
  """
5103
  device_info = []
5104
  disks_ok = True
5105
  iname = instance.name
5106
  disks = _ExpandCheckDisks(instance, disks)
5107

    
5108
  # With the two passes mechanism we try to reduce the window of
5109
  # opportunity for the race condition of switching DRBD to primary
5110
  # before handshaking occured, but we do not eliminate it
5111

    
5112
  # The proper fix would be to wait (with some limits) until the
5113
  # connection has been made and drbd transitions from WFConnection
5114
  # into any other network-connected state (Connected, SyncTarget,
5115
  # SyncSource, etc.)
5116

    
5117
  # 1st pass, assemble on all nodes in secondary mode
5118
  for idx, inst_disk in enumerate(disks):
5119
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
5120
      if ignore_size:
5121
        node_disk = node_disk.Copy()
5122
        node_disk.UnsetSize()
5123
      lu.cfg.SetDiskID(node_disk, node)
5124
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False, idx)
5125
      msg = result.fail_msg
5126
      if msg:
5127
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
5128
                           " (is_primary=False, pass=1): %s",
5129
                           inst_disk.iv_name, node, msg)
5130
        if not ignore_secondaries:
5131
          disks_ok = False
5132

    
5133
  # FIXME: race condition on drbd migration to primary
5134

    
5135
  # 2nd pass, do only the primary node
5136
  for idx, inst_disk in enumerate(disks):
5137
    dev_path = None
5138

    
5139
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
5140
      if node != instance.primary_node:
5141
        continue
5142
      if ignore_size:
5143
        node_disk = node_disk.Copy()
5144
        node_disk.UnsetSize()
5145
      lu.cfg.SetDiskID(node_disk, node)
5146
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True, idx)
5147
      msg = result.fail_msg
5148
      if msg:
5149
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
5150
                           " (is_primary=True, pass=2): %s",
5151
                           inst_disk.iv_name, node, msg)
5152
        disks_ok = False
5153
      else:
5154
        dev_path = result.payload
5155

    
5156
    device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
5157

    
5158
  # leave the disks configured for the primary node
5159
  # this is a workaround that would be fixed better by
5160
  # improving the logical/physical id handling
5161
  for disk in disks:
5162
    lu.cfg.SetDiskID(disk, instance.primary_node)
5163

    
5164
  return disks_ok, device_info
5165

    
5166

    
5167
def _StartInstanceDisks(lu, instance, force):
5168
  """Start the disks of an instance.
5169

5170
  """
5171
  disks_ok, _ = _AssembleInstanceDisks(lu, instance,
5172
                                           ignore_secondaries=force)
5173
  if not disks_ok:
5174
    _ShutdownInstanceDisks(lu, instance)
5175
    if force is not None and not force:
5176
      lu.proc.LogWarning("", hint="If the message above refers to a"
5177
                         " secondary node,"
5178
                         " you can retry the operation using '--force'.")
5179
    raise errors.OpExecError("Disk consistency error")
5180

    
5181

    
5182
class LUInstanceDeactivateDisks(NoHooksLU):
5183
  """Shutdown an instance's disks.
5184

5185
  """
5186
  REQ_BGL = False
5187

    
5188
  def ExpandNames(self):
5189
    self._ExpandAndLockInstance()
5190
    self.needed_locks[locking.LEVEL_NODE] = []
5191
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5192

    
5193
  def DeclareLocks(self, level):
5194
    if level == locking.LEVEL_NODE:
5195
      self._LockInstancesNodes()
5196

    
5197
  def CheckPrereq(self):
5198
    """Check prerequisites.
5199

5200
    This checks that the instance is in the cluster.
5201

5202
    """
5203
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5204
    assert self.instance is not None, \
5205
      "Cannot retrieve locked instance %s" % self.op.instance_name
5206

    
5207
  def Exec(self, feedback_fn):
5208
    """Deactivate the disks
5209

5210
    """
5211
    instance = self.instance
5212
    if self.op.force:
5213
      _ShutdownInstanceDisks(self, instance)
5214
    else:
5215
      _SafeShutdownInstanceDisks(self, instance)
5216

    
5217

    
5218
def _SafeShutdownInstanceDisks(lu, instance, disks=None):
5219
  """Shutdown block devices of an instance.
5220

5221
  This function checks if an instance is running, before calling
5222
  _ShutdownInstanceDisks.
5223

5224
  """
5225
  _CheckInstanceDown(lu, instance, "cannot shutdown disks")
5226
  _ShutdownInstanceDisks(lu, instance, disks=disks)
5227

    
5228

    
5229
def _ExpandCheckDisks(instance, disks):
5230
  """Return the instance disks selected by the disks list
5231

5232
  @type disks: list of L{objects.Disk} or None
5233
  @param disks: selected disks
5234
  @rtype: list of L{objects.Disk}
5235
  @return: selected instance disks to act on
5236

5237
  """
5238
  if disks is None:
5239
    return instance.disks
5240
  else:
5241
    if not set(disks).issubset(instance.disks):
5242
      raise errors.ProgrammerError("Can only act on disks belonging to the"
5243
                                   " target instance")
5244
    return disks
5245

    
5246

    
5247
def _ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
5248
  """Shutdown block devices of an instance.
5249

5250
  This does the shutdown on all nodes of the instance.
5251

5252
  If the ignore_primary is false, errors on the primary node are
5253
  ignored.
5254

5255
  """
5256
  all_result = True
5257
  disks = _ExpandCheckDisks(instance, disks)
5258

    
5259
  for disk in disks:
5260
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
5261
      lu.cfg.SetDiskID(top_disk, node)
5262
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
5263
      msg = result.fail_msg
5264
      if msg:
5265
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
5266
                      disk.iv_name, node, msg)
5267
        if ((node == instance.primary_node and not ignore_primary) or
5268
            (node != instance.primary_node and not result.offline)):
5269
          all_result = False
5270
  return all_result
5271

    
5272

    
5273
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
5274
  """Checks if a node has enough free memory.
5275

5276
  This function check if a given node has the needed amount of free
5277
  memory. In case the node has less memory or we cannot get the
5278
  information from the node, this function raise an OpPrereqError
5279
  exception.
5280

5281
  @type lu: C{LogicalUnit}
5282
  @param lu: a logical unit from which we get configuration data
5283
  @type node: C{str}
5284
  @param node: the node to check
5285
  @type reason: C{str}
5286
  @param reason: string to use in the error message
5287
  @type requested: C{int}
5288
  @param requested: the amount of memory in MiB to check for
5289
  @type hypervisor_name: C{str}
5290
  @param hypervisor_name: the hypervisor to ask for memory stats
5291
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
5292
      we cannot check the node
5293

5294
  """
5295
  nodeinfo = lu.rpc.call_node_info([node], None, hypervisor_name)
5296
  nodeinfo[node].Raise("Can't get data from node %s" % node,
5297
                       prereq=True, ecode=errors.ECODE_ENVIRON)
5298
  free_mem = nodeinfo[node].payload.get('memory_free', None)
5299
  if not isinstance(free_mem, int):
5300
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
5301
                               " was '%s'" % (node, free_mem),
5302
                               errors.ECODE_ENVIRON)
5303
  if requested > free_mem:
5304
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
5305
                               " needed %s MiB, available %s MiB" %
5306
                               (node, reason, requested, free_mem),
5307
                               errors.ECODE_NORES)
5308

    
5309

    
5310
def _CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
5311
  """Checks if nodes have enough free disk space in the all VGs.
5312

5313
  This function check if all given nodes have the needed amount of
5314
  free disk. In case any node has less disk or we cannot get the
5315
  information from the node, this function raise an OpPrereqError
5316
  exception.
5317

5318
  @type lu: C{LogicalUnit}
5319
  @param lu: a logical unit from which we get configuration data
5320
  @type nodenames: C{list}
5321
  @param nodenames: the list of node names to check
5322
  @type req_sizes: C{dict}
5323
  @param req_sizes: the hash of vg and corresponding amount of disk in
5324
      MiB to check for
5325
  @raise errors.OpPrereqError: if the node doesn't have enough disk,
5326
      or we cannot check the node
5327

5328
  """
5329
  for vg, req_size in req_sizes.items():
5330
    _CheckNodesFreeDiskOnVG(lu, nodenames, vg, req_size)
5331

    
5332

    
5333
def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
5334
  """Checks if nodes have enough free disk space in the specified VG.
5335

5336
  This function check if all given nodes have the needed amount of
5337
  free disk. In case any node has less disk or we cannot get the
5338
  information from the node, this function raise an OpPrereqError
5339
  exception.
5340

5341
  @type lu: C{LogicalUnit}
5342
  @param lu: a logical unit from which we get configuration data
5343
  @type nodenames: C{list}
5344
  @param nodenames: the list of node names to check
5345
  @type vg: C{str}
5346
  @param vg: the volume group to check
5347
  @type requested: C{int}
5348
  @param requested: the amount of disk in MiB to check for
5349
  @raise errors.OpPrereqError: if the node doesn't have enough disk,
5350
      or we cannot check the node
5351

5352
  """
5353
  nodeinfo = lu.rpc.call_node_info(nodenames, vg, None)
5354
  for node in nodenames:
5355
    info = nodeinfo[node]
5356
    info.Raise("Cannot get current information from node %s" % node,
5357
               prereq=True, ecode=errors.ECODE_ENVIRON)
5358
    vg_free = info.payload.get("vg_free", None)
5359
    if not isinstance(vg_free, int):
5360
      raise errors.OpPrereqError("Can't compute free disk space on node"
5361
                                 " %s for vg %s, result was '%s'" %
5362
                                 (node, vg, vg_free), errors.ECODE_ENVIRON)
5363
    if requested > vg_free:
5364
      raise errors.OpPrereqError("Not enough disk space on target node %s"
5365
                                 " vg %s: required %d MiB, available %d MiB" %
5366
                                 (node, vg, requested, vg_free),
5367
                                 errors.ECODE_NORES)
5368

    
5369

    
5370
class LUInstanceStartup(LogicalUnit):
5371
  """Starts an instance.
5372

5373
  """
5374
  HPATH = "instance-start"
5375
  HTYPE = constants.HTYPE_INSTANCE
5376
  REQ_BGL = False
5377

    
5378
  def CheckArguments(self):
5379
    # extra beparams
5380
    if self.op.beparams:
5381
      # fill the beparams dict
5382
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
5383

    
5384
  def ExpandNames(self):
5385
    self._ExpandAndLockInstance()
5386

    
5387
  def BuildHooksEnv(self):
5388
    """Build hooks env.
5389

5390
    This runs on master, primary and secondary nodes of the instance.
5391

5392
    """
5393
    env = {
5394
      "FORCE": self.op.force,
5395
      }
5396

    
5397
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5398

    
5399
    return env
5400

    
5401
  def BuildHooksNodes(self):
5402
    """Build hooks nodes.
5403

5404
    """
5405
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5406
    return (nl, nl)
5407

    
5408
  def CheckPrereq(self):
5409
    """Check prerequisites.
5410

5411
    This checks that the instance is in the cluster.
5412

5413
    """
5414
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5415
    assert self.instance is not None, \
5416
      "Cannot retrieve locked instance %s" % self.op.instance_name
5417

    
5418
    # extra hvparams
5419
    if self.op.hvparams:
5420
      # check hypervisor parameter syntax (locally)
5421
      cluster = self.cfg.GetClusterInfo()
5422
      utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
5423
      filled_hvp = cluster.FillHV(instance)
5424
      filled_hvp.update(self.op.hvparams)
5425
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
5426
      hv_type.CheckParameterSyntax(filled_hvp)
5427
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
5428

    
5429
    self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline
5430

    
5431
    if self.primary_offline and self.op.ignore_offline_nodes:
5432
      self.proc.LogWarning("Ignoring offline primary node")
5433

    
5434
      if self.op.hvparams or self.op.beparams:
5435
        self.proc.LogWarning("Overridden parameters are ignored")
5436
    else:
5437
      _CheckNodeOnline(self, instance.primary_node)
5438

    
5439
      bep = self.cfg.GetClusterInfo().FillBE(instance)
5440

    
5441
      # check bridges existence
5442
      _CheckInstanceBridgesExist(self, instance)
5443

    
5444
      remote_info = self.rpc.call_instance_info(instance.primary_node,
5445
                                                instance.name,
5446
                                                instance.hypervisor)
5447
      remote_info.Raise("Error checking node %s" % instance.primary_node,
5448
                        prereq=True, ecode=errors.ECODE_ENVIRON)
5449
      if not remote_info.payload: # not running already
5450
        _CheckNodeFreeMemory(self, instance.primary_node,
5451
                             "starting instance %s" % instance.name,
5452
                             bep[constants.BE_MEMORY], instance.hypervisor)
5453

    
5454
  def Exec(self, feedback_fn):
5455
    """Start the instance.
5456

5457
    """
5458
    instance = self.instance
5459
    force = self.op.force
5460

    
5461
    self.cfg.MarkInstanceUp(instance.name)
5462

    
5463
    if self.primary_offline:
5464
      assert self.op.ignore_offline_nodes
5465
      self.proc.LogInfo("Primary node offline, marked instance as started")
5466
    else:
5467
      node_current = instance.primary_node
5468

    
5469
      _StartInstanceDisks(self, instance, force)
5470

    
5471
      result = self.rpc.call_instance_start(node_current, instance,
5472
                                            self.op.hvparams, self.op.beparams)
5473
      msg = result.fail_msg
5474
      if msg:
5475
        _ShutdownInstanceDisks(self, instance)
5476
        raise errors.OpExecError("Could not start instance: %s" % msg)
5477

    
5478

    
5479
class LUInstanceReboot(LogicalUnit):
5480
  """Reboot an instance.
5481

5482
  """
5483
  HPATH = "instance-reboot"
5484
  HTYPE = constants.HTYPE_INSTANCE
5485
  REQ_BGL = False
5486

    
5487
  def ExpandNames(self):
5488
    self._ExpandAndLockInstance()
5489

    
5490
  def BuildHooksEnv(self):
5491
    """Build hooks env.
5492

5493
    This runs on master, primary and secondary nodes of the instance.
5494

5495
    """
5496
    env = {
5497
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
5498
      "REBOOT_TYPE": self.op.reboot_type,
5499
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5500
      }
5501

    
5502
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5503

    
5504
    return env
5505

    
5506
  def BuildHooksNodes(self):
5507
    """Build hooks nodes.
5508

5509
    """
5510
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5511
    return (nl, nl)
5512

    
5513
  def CheckPrereq(self):
5514
    """Check prerequisites.
5515

5516
    This checks that the instance is in the cluster.
5517

5518
    """
5519
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5520
    assert self.instance is not None, \
5521
      "Cannot retrieve locked instance %s" % self.op.instance_name
5522

    
5523
    _CheckNodeOnline(self, instance.primary_node)
5524

    
5525
    # check bridges existence
5526
    _CheckInstanceBridgesExist(self, instance)
5527

    
5528
  def Exec(self, feedback_fn):
5529
    """Reboot the instance.
5530

5531
    """
5532
    instance = self.instance
5533
    ignore_secondaries = self.op.ignore_secondaries
5534
    reboot_type = self.op.reboot_type
5535

    
5536
    remote_info = self.rpc.call_instance_info(instance.primary_node,
5537
                                              instance.name,
5538
                                              instance.hypervisor)
5539
    remote_info.Raise("Error checking node %s" % instance.primary_node)
5540
    instance_running = bool(remote_info.payload)
5541

    
5542
    node_current = instance.primary_node
5543

    
5544
    if instance_running and reboot_type in [constants.INSTANCE_REBOOT_SOFT,
5545
                                            constants.INSTANCE_REBOOT_HARD]:
5546
      for disk in instance.disks:
5547
        self.cfg.SetDiskID(disk, node_current)
5548
      result = self.rpc.call_instance_reboot(node_current, instance,
5549
                                             reboot_type,
5550
                                             self.op.shutdown_timeout)
5551
      result.Raise("Could not reboot instance")
5552
    else:
5553
      if instance_running:
5554
        result = self.rpc.call_instance_shutdown(node_current, instance,
5555
                                                 self.op.shutdown_timeout)
5556
        result.Raise("Could not shutdown instance for full reboot")
5557
        _ShutdownInstanceDisks(self, instance)
5558
      else:
5559
        self.LogInfo("Instance %s was already stopped, starting now",
5560
                     instance.name)
5561
      _StartInstanceDisks(self, instance, ignore_secondaries)
5562
      result = self.rpc.call_instance_start(node_current, instance, None, None)
5563
      msg = result.fail_msg
5564
      if msg:
5565
        _ShutdownInstanceDisks(self, instance)
5566
        raise errors.OpExecError("Could not start instance for"
5567
                                 " full reboot: %s" % msg)
5568

    
5569
    self.cfg.MarkInstanceUp(instance.name)
5570

    
5571

    
5572
class LUInstanceShutdown(LogicalUnit):
5573
  """Shutdown an instance.
5574

5575
  """
5576
  HPATH = "instance-stop"
5577
  HTYPE = constants.HTYPE_INSTANCE
5578
  REQ_BGL = False
5579

    
5580
  def ExpandNames(self):
5581
    self._ExpandAndLockInstance()
5582

    
5583
  def BuildHooksEnv(self):
5584
    """Build hooks env.
5585

5586
    This runs on master, primary and secondary nodes of the instance.
5587

5588
    """
5589
    env = _BuildInstanceHookEnvByObject(self, self.instance)
5590
    env["TIMEOUT"] = self.op.timeout
5591
    return env
5592

    
5593
  def BuildHooksNodes(self):
5594
    """Build hooks nodes.
5595

5596
    """
5597
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5598
    return (nl, nl)
5599

    
5600
  def CheckPrereq(self):
5601
    """Check prerequisites.
5602

5603
    This checks that the instance is in the cluster.
5604

5605
    """
5606
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5607
    assert self.instance is not None, \
5608
      "Cannot retrieve locked instance %s" % self.op.instance_name
5609

    
5610
    self.primary_offline = \
5611
      self.cfg.GetNodeInfo(self.instance.primary_node).offline
5612

    
5613
    if self.primary_offline and self.op.ignore_offline_nodes:
5614
      self.proc.LogWarning("Ignoring offline primary node")
5615
    else:
5616
      _CheckNodeOnline(self, self.instance.primary_node)
5617

    
5618
  def Exec(self, feedback_fn):
5619
    """Shutdown the instance.
5620

5621
    """
5622
    instance = self.instance
5623
    node_current = instance.primary_node
5624
    timeout = self.op.timeout
5625

    
5626
    self.cfg.MarkInstanceDown(instance.name)
5627

    
5628
    if self.primary_offline:
5629
      assert self.op.ignore_offline_nodes
5630
      self.proc.LogInfo("Primary node offline, marked instance as stopped")
5631
    else:
5632
      result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
5633
      msg = result.fail_msg
5634
      if msg:
5635
        self.proc.LogWarning("Could not shutdown instance: %s" % msg)
5636

    
5637
      _ShutdownInstanceDisks(self, instance)
5638

    
5639

    
5640
class LUInstanceReinstall(LogicalUnit):
5641
  """Reinstall an instance.
5642

5643
  """
5644
  HPATH = "instance-reinstall"
5645
  HTYPE = constants.HTYPE_INSTANCE
5646
  REQ_BGL = False
5647

    
5648
  def ExpandNames(self):
5649
    self._ExpandAndLockInstance()
5650

    
5651
  def BuildHooksEnv(self):
5652
    """Build hooks env.
5653

5654
    This runs on master, primary and secondary nodes of the instance.
5655

5656
    """
5657
    return _BuildInstanceHookEnvByObject(self, self.instance)
5658

    
5659
  def BuildHooksNodes(self):
5660
    """Build hooks nodes.
5661

5662
    """
5663
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5664
    return (nl, nl)
5665

    
5666
  def CheckPrereq(self):
5667
    """Check prerequisites.
5668

5669
    This checks that the instance is in the cluster and is not running.
5670

5671
    """
5672
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5673
    assert instance is not None, \
5674
      "Cannot retrieve locked instance %s" % self.op.instance_name
5675
    _CheckNodeOnline(self, instance.primary_node, "Instance primary node"
5676
                     " offline, cannot reinstall")
5677
    for node in instance.secondary_nodes:
5678
      _CheckNodeOnline(self, node, "Instance secondary node offline,"
5679
                       " cannot reinstall")
5680

    
5681
    if instance.disk_template == constants.DT_DISKLESS:
5682
      raise errors.OpPrereqError("Instance '%s' has no disks" %
5683
                                 self.op.instance_name,
5684
                                 errors.ECODE_INVAL)
5685
    _CheckInstanceDown(self, instance, "cannot reinstall")
5686

    
5687
    if self.op.os_type is not None:
5688
      # OS verification
5689
      pnode = _ExpandNodeName(self.cfg, instance.primary_node)
5690
      _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
5691
      instance_os = self.op.os_type
5692
    else:
5693
      instance_os = instance.os
5694

    
5695
    nodelist = list(instance.all_nodes)
5696

    
5697
    if self.op.osparams:
5698
      i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
5699
      _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
5700
      self.os_inst = i_osdict # the new dict (without defaults)
5701
    else:
5702
      self.os_inst = None
5703

    
5704
    self.instance = instance
5705

    
5706
  def Exec(self, feedback_fn):
5707
    """Reinstall the instance.
5708

5709
    """
5710
    inst = self.instance
5711

    
5712
    if self.op.os_type is not None:
5713
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
5714
      inst.os = self.op.os_type
5715
      # Write to configuration
5716
      self.cfg.Update(inst, feedback_fn)
5717

    
5718
    _StartInstanceDisks(self, inst, None)
5719
    try:
5720
      feedback_fn("Running the instance OS create scripts...")
5721
      # FIXME: pass debug option from opcode to backend
5722
      result = self.rpc.call_instance_os_add(inst.primary_node, inst, True,
5723
                                             self.op.debug_level,
5724
                                             osparams=self.os_inst)
5725
      result.Raise("Could not install OS for instance %s on node %s" %
5726
                   (inst.name, inst.primary_node))
5727
    finally:
5728
      _ShutdownInstanceDisks(self, inst)
5729

    
5730

    
5731
class LUInstanceRecreateDisks(LogicalUnit):
5732
  """Recreate an instance's missing disks.
5733

5734
  """
5735
  HPATH = "instance-recreate-disks"
5736
  HTYPE = constants.HTYPE_INSTANCE
5737
  REQ_BGL = False
5738

    
5739
  def CheckArguments(self):
5740
    # normalise the disk list
5741
    self.op.disks = sorted(frozenset(self.op.disks))
5742

    
5743
  def ExpandNames(self):
5744
    self._ExpandAndLockInstance()
5745
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5746
    if self.op.nodes:
5747
      self.op.nodes = [_ExpandNodeName(self.cfg, n) for n in self.op.nodes]
5748
      self.needed_locks[locking.LEVEL_NODE] = list(self.op.nodes)
5749
    else:
5750
      self.needed_locks[locking.LEVEL_NODE] = []
5751

    
5752
  def DeclareLocks(self, level):
5753
    if level == locking.LEVEL_NODE:
5754
      # if we replace the nodes, we only need to lock the old primary,
5755
      # otherwise we need to lock all nodes for disk re-creation
5756
      primary_only = bool(self.op.nodes)
5757
      self._LockInstancesNodes(primary_only=primary_only)
5758

    
5759
  def BuildHooksEnv(self):
5760
    """Build hooks env.
5761

5762
    This runs on master, primary and secondary nodes of the instance.
5763

5764
    """
5765
    return _BuildInstanceHookEnvByObject(self, self.instance)
5766

    
5767
  def BuildHooksNodes(self):
5768
    """Build hooks nodes.
5769

5770
    """
5771
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5772
    return (nl, nl)
5773

    
5774
  def CheckPrereq(self):
5775
    """Check prerequisites.
5776

5777
    This checks that the instance is in the cluster and is not running.
5778

5779
    """
5780
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5781
    assert instance is not None, \
5782
      "Cannot retrieve locked instance %s" % self.op.instance_name
5783
    if self.op.nodes:
5784
      if len(self.op.nodes) != len(instance.all_nodes):
5785
        raise errors.OpPrereqError("Instance %s currently has %d nodes, but"
5786
                                   " %d replacement nodes were specified" %
5787
                                   (instance.name, len(instance.all_nodes),
5788
                                    len(self.op.nodes)),
5789
                                   errors.ECODE_INVAL)
5790
      assert instance.disk_template != constants.DT_DRBD8 or \
5791
          len(self.op.nodes) == 2
5792
      assert instance.disk_template != constants.DT_PLAIN or \
5793
          len(self.op.nodes) == 1
5794
      primary_node = self.op.nodes[0]
5795
    else:
5796
      primary_node = instance.primary_node
5797
    _CheckNodeOnline(self, primary_node)
5798

    
5799
    if instance.disk_template == constants.DT_DISKLESS:
5800
      raise errors.OpPrereqError("Instance '%s' has no disks" %
5801
                                 self.op.instance_name, errors.ECODE_INVAL)
5802
    # if we replace nodes *and* the old primary is offline, we don't
5803
    # check
5804
    assert instance.primary_node in self.needed_locks[locking.LEVEL_NODE]
5805
    old_pnode = self.cfg.GetNodeInfo(instance.primary_node)
5806
    if not (self.op.nodes and old_pnode.offline):
5807
      _CheckInstanceDown(self, instance, "cannot recreate disks")
5808

    
5809
    if not self.op.disks:
5810
      self.op.disks = range(len(instance.disks))
5811
    else:
5812
      for idx in self.op.disks:
5813
        if idx >= len(instance.disks):
5814
          raise errors.OpPrereqError("Invalid disk index '%s'" % idx,
5815
                                     errors.ECODE_INVAL)
5816
    if self.op.disks != range(len(instance.disks)) and self.op.nodes:
5817
      raise errors.OpPrereqError("Can't recreate disks partially and"
5818
                                 " change the nodes at the same time",
5819
                                 errors.ECODE_INVAL)
5820
    self.instance = instance
5821

    
5822
  def Exec(self, feedback_fn):
5823
    """Recreate the disks.
5824

5825
    """
5826
    # change primary node, if needed
5827
    if self.op.nodes:
5828
      self.instance.primary_node = self.op.nodes[0]
5829
      self.LogWarning("Changing the instance's nodes, you will have to"
5830
                      " remove any disks left on the older nodes manually")
5831

    
5832
    to_skip = []
5833
    for idx, disk in enumerate(self.instance.disks):
5834
      if idx not in self.op.disks: # disk idx has not been passed in
5835
        to_skip.append(idx)
5836
        continue
5837
      # update secondaries for disks, if needed
5838
      if self.op.nodes:
5839
        if disk.dev_type == constants.LD_DRBD8:
5840
          # need to update the nodes
5841
          assert len(self.op.nodes) == 2
5842
          logical_id = list(disk.logical_id)
5843
          logical_id[0] = self.op.nodes[0]
5844
          logical_id[1] = self.op.nodes[1]
5845
          disk.logical_id = tuple(logical_id)
5846

    
5847
    if self.op.nodes:
5848
      self.cfg.Update(self.instance, feedback_fn)
5849

    
5850
    _CreateDisks(self, self.instance, to_skip=to_skip)
5851

    
5852

    
5853
class LUInstanceRename(LogicalUnit):
5854
  """Rename an instance.
5855

5856
  """
5857
  HPATH = "instance-rename"
5858
  HTYPE = constants.HTYPE_INSTANCE
5859

    
5860
  def CheckArguments(self):
5861
    """Check arguments.
5862

5863
    """
5864
    if self.op.ip_check and not self.op.name_check:
5865
      # TODO: make the ip check more flexible and not depend on the name check
5866
      raise errors.OpPrereqError("IP address check requires a name check",
5867
                                 errors.ECODE_INVAL)
5868

    
5869
  def BuildHooksEnv(self):
5870
    """Build hooks env.
5871

5872
    This runs on master, primary and secondary nodes of the instance.
5873

5874
    """
5875
    env = _BuildInstanceHookEnvByObject(self, self.instance)
5876
    env["INSTANCE_NEW_NAME"] = self.op.new_name
5877
    return env
5878

    
5879
  def BuildHooksNodes(self):
5880
    """Build hooks nodes.
5881

5882
    """
5883
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5884
    return (nl, nl)
5885

    
5886
  def CheckPrereq(self):
5887
    """Check prerequisites.
5888

5889
    This checks that the instance is in the cluster and is not running.
5890

5891
    """
5892
    self.op.instance_name = _ExpandInstanceName(self.cfg,
5893
                                                self.op.instance_name)
5894
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5895
    assert instance is not None
5896
    _CheckNodeOnline(self, instance.primary_node)
5897
    _CheckInstanceDown(self, instance, "cannot rename")
5898
    self.instance = instance
5899

    
5900
    new_name = self.op.new_name
5901
    if self.op.name_check:
5902
      hostname = netutils.GetHostname(name=new_name)
5903
      if hostname != new_name:
5904
        self.LogInfo("Resolved given name '%s' to '%s'", new_name,
5905
                     hostname.name)
5906
      if not utils.MatchNameComponent(self.op.new_name, [hostname.name]):
5907
        raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
5908
                                    " same as given hostname '%s'") %
5909
                                    (hostname.name, self.op.new_name),
5910
                                    errors.ECODE_INVAL)
5911
      new_name = self.op.new_name = hostname.name
5912
      if (self.op.ip_check and
5913
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
5914
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
5915
                                   (hostname.ip, new_name),
5916
                                   errors.ECODE_NOTUNIQUE)
5917

    
5918
    instance_list = self.cfg.GetInstanceList()
5919
    if new_name in instance_list and new_name != instance.name:
5920
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
5921
                                 new_name, errors.ECODE_EXISTS)
5922

    
5923
  def Exec(self, feedback_fn):
5924
    """Rename the instance.
5925

5926
    """
5927
    inst = self.instance
5928
    old_name = inst.name
5929

    
5930
    rename_file_storage = False
5931
    if (inst.disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE) and
5932
        self.op.new_name != inst.name):
5933
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
5934
      rename_file_storage = True
5935

    
5936
    self.cfg.RenameInstance(inst.name, self.op.new_name)
5937
    # Change the instance lock. This is definitely safe while we hold the BGL.
5938
    # Otherwise the new lock would have to be added in acquired mode.
5939
    assert self.REQ_BGL
5940
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
5941
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
5942

    
5943
    # re-read the instance from the configuration after rename
5944
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
5945

    
5946
    if rename_file_storage:
5947
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
5948
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
5949
                                                     old_file_storage_dir,
5950
                                                     new_file_storage_dir)
5951
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
5952
                   " (but the instance has been renamed in Ganeti)" %
5953
                   (inst.primary_node, old_file_storage_dir,
5954
                    new_file_storage_dir))
5955

    
5956
    _StartInstanceDisks(self, inst, None)
5957
    try:
5958
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
5959
                                                 old_name, self.op.debug_level)
5960
      msg = result.fail_msg
5961
      if msg:
5962
        msg = ("Could not run OS rename script for instance %s on node %s"
5963
               " (but the instance has been renamed in Ganeti): %s" %
5964
               (inst.name, inst.primary_node, msg))
5965
        self.proc.LogWarning(msg)
5966
    finally:
5967
      _ShutdownInstanceDisks(self, inst)
5968

    
5969
    return inst.name
5970

    
5971

    
5972
class LUInstanceRemove(LogicalUnit):
5973
  """Remove an instance.
5974

5975
  """
5976
  HPATH = "instance-remove"
5977
  HTYPE = constants.HTYPE_INSTANCE
5978
  REQ_BGL = False
5979

    
5980
  def ExpandNames(self):
5981
    self._ExpandAndLockInstance()
5982
    self.needed_locks[locking.LEVEL_NODE] = []
5983
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5984

    
5985
  def DeclareLocks(self, level):
5986
    if level == locking.LEVEL_NODE:
5987
      self._LockInstancesNodes()
5988

    
5989
  def BuildHooksEnv(self):
5990
    """Build hooks env.
5991

5992
    This runs on master, primary and secondary nodes of the instance.
5993

5994
    """
5995
    env = _BuildInstanceHookEnvByObject(self, self.instance)
5996
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
5997
    return env
5998

    
5999
  def BuildHooksNodes(self):
6000
    """Build hooks nodes.
6001

6002
    """
6003
    nl = [self.cfg.GetMasterNode()]
6004
    nl_post = list(self.instance.all_nodes) + nl
6005
    return (nl, nl_post)
6006

    
6007
  def CheckPrereq(self):
6008
    """Check prerequisites.
6009

6010
    This checks that the instance is in the cluster.
6011

6012
    """
6013
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6014
    assert self.instance is not None, \
6015
      "Cannot retrieve locked instance %s" % self.op.instance_name
6016

    
6017
  def Exec(self, feedback_fn):
6018
    """Remove the instance.
6019

6020
    """
6021
    instance = self.instance
6022
    logging.info("Shutting down instance %s on node %s",
6023
                 instance.name, instance.primary_node)
6024

    
6025
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
6026
                                             self.op.shutdown_timeout)
6027
    msg = result.fail_msg
6028
    if msg:
6029
      if self.op.ignore_failures:
6030
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
6031
      else:
6032
        raise errors.OpExecError("Could not shutdown instance %s on"
6033
                                 " node %s: %s" %
6034
                                 (instance.name, instance.primary_node, msg))
6035

    
6036
    _RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
6037

    
6038

    
6039
def _RemoveInstance(lu, feedback_fn, instance, ignore_failures):
6040
  """Utility function to remove an instance.
6041

6042
  """
6043
  logging.info("Removing block devices for instance %s", instance.name)
6044

    
6045
  if not _RemoveDisks(lu, instance):
6046
    if not ignore_failures:
6047
      raise errors.OpExecError("Can't remove instance's disks")
6048
    feedback_fn("Warning: can't remove instance's disks")
6049

    
6050
  logging.info("Removing instance %s out of cluster config", instance.name)
6051

    
6052
  lu.cfg.RemoveInstance(instance.name)
6053

    
6054
  assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
6055
    "Instance lock removal conflict"
6056

    
6057
  # Remove lock for the instance
6058
  lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
6059

    
6060

    
6061
class LUInstanceQuery(NoHooksLU):
6062
  """Logical unit for querying instances.
6063

6064
  """
6065
  # pylint: disable-msg=W0142
6066
  REQ_BGL = False
6067

    
6068
  def CheckArguments(self):
6069
    self.iq = _InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
6070
                             self.op.output_fields, self.op.use_locking)
6071

    
6072
  def ExpandNames(self):
6073
    self.iq.ExpandNames(self)
6074

    
6075
  def DeclareLocks(self, level):
6076
    self.iq.DeclareLocks(self, level)
6077

    
6078
  def Exec(self, feedback_fn):
6079
    return self.iq.OldStyleQuery(self)
6080

    
6081

    
6082
class LUInstanceFailover(LogicalUnit):
6083
  """Failover an instance.
6084

6085
  """
6086
  HPATH = "instance-failover"
6087
  HTYPE = constants.HTYPE_INSTANCE
6088
  REQ_BGL = False
6089

    
6090
  def CheckArguments(self):
6091
    """Check the arguments.
6092

6093
    """
6094
    self.iallocator = getattr(self.op, "iallocator", None)
6095
    self.target_node = getattr(self.op, "target_node", None)
6096

    
6097
  def ExpandNames(self):
6098
    self._ExpandAndLockInstance()
6099

    
6100
    if self.op.target_node is not None:
6101
      self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
6102

    
6103
    self.needed_locks[locking.LEVEL_NODE] = []
6104
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6105

    
6106
    ignore_consistency = self.op.ignore_consistency
6107
    shutdown_timeout = self.op.shutdown_timeout
6108
    self._migrater = TLMigrateInstance(self, self.op.instance_name,
6109
                                       cleanup=False,
6110
                                       failover=True,
6111
                                       ignore_consistency=ignore_consistency,
6112
                                       shutdown_timeout=shutdown_timeout)
6113
    self.tasklets = [self._migrater]
6114

    
6115
  def DeclareLocks(self, level):
6116
    if level == locking.LEVEL_NODE:
6117
      instance = self.context.cfg.GetInstanceInfo(self.op.instance_name)
6118
      if instance.disk_template in constants.DTS_EXT_MIRROR:
6119
        if self.op.target_node is None:
6120
          self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6121
        else:
6122
          self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
6123
                                                   self.op.target_node]
6124
        del self.recalculate_locks[locking.LEVEL_NODE]
6125
      else:
6126
        self._LockInstancesNodes()
6127

    
6128
  def BuildHooksEnv(self):
6129
    """Build hooks env.
6130

6131
    This runs on master, primary and secondary nodes of the instance.
6132

6133
    """
6134
    instance = self._migrater.instance
6135
    source_node = instance.primary_node
6136
    target_node = self.op.target_node
6137
    env = {
6138
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
6139
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
6140
      "OLD_PRIMARY": source_node,
6141
      "NEW_PRIMARY": target_node,
6142
      }
6143

    
6144
    if instance.disk_template in constants.DTS_INT_MIRROR:
6145
      env["OLD_SECONDARY"] = instance.secondary_nodes[0]
6146
      env["NEW_SECONDARY"] = source_node
6147
    else:
6148
      env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = ""
6149

    
6150
    env.update(_BuildInstanceHookEnvByObject(self, instance))
6151

    
6152
    return env
6153

    
6154
  def BuildHooksNodes(self):
6155
    """Build hooks nodes.
6156

6157
    """
6158
    instance = self._migrater.instance
6159
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
6160
    return (nl, nl + [instance.primary_node])
6161

    
6162

    
6163
class LUInstanceMigrate(LogicalUnit):
6164
  """Migrate an instance.
6165

6166
  This is migration without shutting down, compared to the failover,
6167
  which is done with shutdown.
6168

6169
  """
6170
  HPATH = "instance-migrate"
6171
  HTYPE = constants.HTYPE_INSTANCE
6172
  REQ_BGL = False
6173

    
6174
  def ExpandNames(self):
6175
    self._ExpandAndLockInstance()
6176

    
6177
    if self.op.target_node is not None:
6178
      self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
6179

    
6180
    self.needed_locks[locking.LEVEL_NODE] = []
6181
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6182

    
6183
    self._migrater = TLMigrateInstance(self, self.op.instance_name,
6184
                                       cleanup=self.op.cleanup,
6185
                                       failover=False,
6186
                                       fallback=self.op.allow_failover)
6187
    self.tasklets = [self._migrater]
6188

    
6189
  def DeclareLocks(self, level):
6190
    if level == locking.LEVEL_NODE:
6191
      instance = self.context.cfg.GetInstanceInfo(self.op.instance_name)
6192
      if instance.disk_template in constants.DTS_EXT_MIRROR:
6193
        if self.op.target_node is None:
6194
          self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6195
        else:
6196
          self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
6197
                                                   self.op.target_node]
6198
        del self.recalculate_locks[locking.LEVEL_NODE]
6199
      else:
6200
        self._LockInstancesNodes()
6201

    
6202
  def BuildHooksEnv(self):
6203
    """Build hooks env.
6204

6205
    This runs on master, primary and secondary nodes of the instance.
6206

6207
    """
6208
    instance = self._migrater.instance
6209
    source_node = instance.primary_node
6210
    target_node = self.op.target_node
6211
    env = _BuildInstanceHookEnvByObject(self, instance)
6212
    env.update({
6213
      "MIGRATE_LIVE": self._migrater.live,
6214
      "MIGRATE_CLEANUP": self.op.cleanup,
6215
      "OLD_PRIMARY": source_node,
6216
      "NEW_PRIMARY": target_node,
6217
      })
6218

    
6219
    if instance.disk_template in constants.DTS_INT_MIRROR:
6220
      env["OLD_SECONDARY"] = target_node
6221
      env["NEW_SECONDARY"] = source_node
6222
    else:
6223
      env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = None
6224

    
6225
    return env
6226

    
6227
  def BuildHooksNodes(self):
6228
    """Build hooks nodes.
6229

6230
    """
6231
    instance = self._migrater.instance
6232
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
6233
    return (nl, nl + [instance.primary_node])
6234

    
6235

    
6236
class LUInstanceMove(LogicalUnit):
6237
  """Move an instance by data-copying.
6238

6239
  """
6240
  HPATH = "instance-move"
6241
  HTYPE = constants.HTYPE_INSTANCE
6242
  REQ_BGL = False
6243

    
6244
  def ExpandNames(self):
6245
    self._ExpandAndLockInstance()
6246
    target_node = _ExpandNodeName(self.cfg, self.op.target_node)
6247
    self.op.target_node = target_node
6248
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
6249
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6250

    
6251
  def DeclareLocks(self, level):
6252
    if level == locking.LEVEL_NODE:
6253
      self._LockInstancesNodes(primary_only=True)
6254

    
6255
  def BuildHooksEnv(self):
6256
    """Build hooks env.
6257

6258
    This runs on master, primary and secondary nodes of the instance.
6259

6260
    """
6261
    env = {
6262
      "TARGET_NODE": self.op.target_node,
6263
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
6264
      }
6265
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6266
    return env
6267

    
6268
  def BuildHooksNodes(self):
6269
    """Build hooks nodes.
6270

6271
    """
6272
    nl = [
6273
      self.cfg.GetMasterNode(),
6274
      self.instance.primary_node,
6275
      self.op.target_node,
6276
      ]
6277
    return (nl, nl)
6278

    
6279
  def CheckPrereq(self):
6280
    """Check prerequisites.
6281

6282
    This checks that the instance is in the cluster.
6283

6284
    """
6285
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6286
    assert self.instance is not None, \
6287
      "Cannot retrieve locked instance %s" % self.op.instance_name
6288

    
6289
    node = self.cfg.GetNodeInfo(self.op.target_node)
6290
    assert node is not None, \
6291
      "Cannot retrieve locked node %s" % self.op.target_node
6292

    
6293
    self.target_node = target_node = node.name
6294

    
6295
    if target_node == instance.primary_node:
6296
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
6297
                                 (instance.name, target_node),
6298
                                 errors.ECODE_STATE)
6299

    
6300
    bep = self.cfg.GetClusterInfo().FillBE(instance)
6301

    
6302
    for idx, dsk in enumerate(instance.disks):
6303
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
6304
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
6305
                                   " cannot copy" % idx, errors.ECODE_STATE)
6306

    
6307
    _CheckNodeOnline(self, target_node)
6308
    _CheckNodeNotDrained(self, target_node)
6309
    _CheckNodeVmCapable(self, target_node)
6310

    
6311
    if instance.admin_up:
6312
      # check memory requirements on the secondary node
6313
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
6314
                           instance.name, bep[constants.BE_MEMORY],
6315
                           instance.hypervisor)
6316
    else:
6317
      self.LogInfo("Not checking memory on the secondary node as"
6318
                   " instance will not be started")
6319

    
6320
    # check bridge existance
6321
    _CheckInstanceBridgesExist(self, instance, node=target_node)
6322

    
6323
  def Exec(self, feedback_fn):
6324
    """Move an instance.
6325

6326
    The move is done by shutting it down on its present node, copying
6327
    the data over (slow) and starting it on the new node.
6328

6329
    """
6330
    instance = self.instance
6331

    
6332
    source_node = instance.primary_node
6333
    target_node = self.target_node
6334

    
6335
    self.LogInfo("Shutting down instance %s on source node %s",
6336
                 instance.name, source_node)
6337

    
6338
    result = self.rpc.call_instance_shutdown(source_node, instance,
6339
                                             self.op.shutdown_timeout)
6340
    msg = result.fail_msg
6341
    if msg:
6342
      if self.op.ignore_consistency:
6343
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
6344
                             " Proceeding anyway. Please make sure node"
6345
                             " %s is down. Error details: %s",
6346
                             instance.name, source_node, source_node, msg)
6347
      else:
6348
        raise errors.OpExecError("Could not shutdown instance %s on"
6349
                                 " node %s: %s" %
6350
                                 (instance.name, source_node, msg))
6351

    
6352
    # create the target disks
6353
    try:
6354
      _CreateDisks(self, instance, target_node=target_node)
6355
    except errors.OpExecError:
6356
      self.LogWarning("Device creation failed, reverting...")
6357
      try:
6358
        _RemoveDisks(self, instance, target_node=target_node)
6359
      finally:
6360
        self.cfg.ReleaseDRBDMinors(instance.name)
6361
        raise
6362

    
6363
    cluster_name = self.cfg.GetClusterInfo().cluster_name
6364

    
6365
    errs = []
6366
    # activate, get path, copy the data over
6367
    for idx, disk in enumerate(instance.disks):
6368
      self.LogInfo("Copying data for disk %d", idx)
6369
      result = self.rpc.call_blockdev_assemble(target_node, disk,
6370
                                               instance.name, True, idx)
6371
      if result.fail_msg:
6372
        self.LogWarning("Can't assemble newly created disk %d: %s",
6373
                        idx, result.fail_msg)
6374
        errs.append(result.fail_msg)
6375
        break
6376
      dev_path = result.payload
6377
      result = self.rpc.call_blockdev_export(source_node, disk,
6378
                                             target_node, dev_path,
6379
                                             cluster_name)
6380
      if result.fail_msg:
6381
        self.LogWarning("Can't copy data over for disk %d: %s",
6382
                        idx, result.fail_msg)
6383
        errs.append(result.fail_msg)
6384
        break
6385

    
6386
    if errs:
6387
      self.LogWarning("Some disks failed to copy, aborting")
6388
      try:
6389
        _RemoveDisks(self, instance, target_node=target_node)
6390
      finally:
6391
        self.cfg.ReleaseDRBDMinors(instance.name)
6392
        raise errors.OpExecError("Errors during disk copy: %s" %
6393
                                 (",".join(errs),))
6394

    
6395
    instance.primary_node = target_node
6396
    self.cfg.Update(instance, feedback_fn)
6397

    
6398
    self.LogInfo("Removing the disks on the original node")
6399
    _RemoveDisks(self, instance, target_node=source_node)
6400

    
6401
    # Only start the instance if it's marked as up
6402
    if instance.admin_up:
6403
      self.LogInfo("Starting instance %s on node %s",
6404
                   instance.name, target_node)
6405

    
6406
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
6407
                                           ignore_secondaries=True)
6408
      if not disks_ok:
6409
        _ShutdownInstanceDisks(self, instance)
6410
        raise errors.OpExecError("Can't activate the instance's disks")
6411

    
6412
      result = self.rpc.call_instance_start(target_node, instance, None, None)
6413
      msg = result.fail_msg
6414
      if msg:
6415
        _ShutdownInstanceDisks(self, instance)
6416
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
6417
                                 (instance.name, target_node, msg))
6418

    
6419

    
6420
class LUNodeMigrate(LogicalUnit):
6421
  """Migrate all instances from a node.
6422

6423
  """
6424
  HPATH = "node-migrate"
6425
  HTYPE = constants.HTYPE_NODE
6426
  REQ_BGL = False
6427

    
6428
  def CheckArguments(self):
6429
    _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
6430

    
6431
  def ExpandNames(self):
6432
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
6433

    
6434
    self.needed_locks = {}
6435

    
6436
    # Create tasklets for migrating instances for all instances on this node
6437
    names = []
6438
    tasklets = []
6439

    
6440
    self.lock_all_nodes = False
6441

    
6442
    for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name):
6443
      logging.debug("Migrating instance %s", inst.name)
6444
      names.append(inst.name)
6445

    
6446
      tasklets.append(TLMigrateInstance(self, inst.name, cleanup=False))
6447

    
6448
      if inst.disk_template in constants.DTS_EXT_MIRROR:
6449
        # We need to lock all nodes, as the iallocator will choose the
6450
        # destination nodes afterwards
6451
        self.lock_all_nodes = True
6452

    
6453
    self.tasklets = tasklets
6454

    
6455
    # Declare node locks
6456
    if self.lock_all_nodes:
6457
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6458
    else:
6459
      self.needed_locks[locking.LEVEL_NODE] = [self.op.node_name]
6460
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6461

    
6462
    # Declare instance locks
6463
    self.needed_locks[locking.LEVEL_INSTANCE] = names
6464

    
6465
  def DeclareLocks(self, level):
6466
    if level == locking.LEVEL_NODE and not self.lock_all_nodes:
6467
      self._LockInstancesNodes()
6468

    
6469
  def BuildHooksEnv(self):
6470
    """Build hooks env.
6471

6472
    This runs on the master, the primary and all the secondaries.
6473

6474
    """
6475
    return {
6476
      "NODE_NAME": self.op.node_name,
6477
      }
6478

    
6479
  def BuildHooksNodes(self):
6480
    """Build hooks nodes.
6481

6482
    """
6483
    nl = [self.cfg.GetMasterNode()]
6484
    return (nl, nl)
6485

    
6486

    
6487
class TLMigrateInstance(Tasklet):
6488
  """Tasklet class for instance migration.
6489

6490
  @type live: boolean
6491
  @ivar live: whether the migration will be done live or non-live;
6492
      this variable is initalized only after CheckPrereq has run
6493
  @type cleanup: boolean
6494
  @ivar cleanup: Wheater we cleanup from a failed migration
6495
  @type iallocator: string
6496
  @ivar iallocator: The iallocator used to determine target_node
6497
  @type target_node: string
6498
  @ivar target_node: If given, the target_node to reallocate the instance to
6499
  @type failover: boolean
6500
  @ivar failover: Whether operation results in failover or migration
6501
  @type fallback: boolean
6502
  @ivar fallback: Whether fallback to failover is allowed if migration not
6503
                  possible
6504
  @type ignore_consistency: boolean
6505
  @ivar ignore_consistency: Wheter we should ignore consistency between source
6506
                            and target node
6507
  @type shutdown_timeout: int
6508
  @ivar shutdown_timeout: In case of failover timeout of the shutdown
6509

6510
  """
6511
  def __init__(self, lu, instance_name, cleanup=False,
6512
               failover=False, fallback=False,
6513
               ignore_consistency=False,
6514
               shutdown_timeout=constants.DEFAULT_SHUTDOWN_TIMEOUT):
6515
    """Initializes this class.
6516

6517
    """
6518
    Tasklet.__init__(self, lu)
6519

    
6520
    # Parameters
6521
    self.instance_name = instance_name
6522
    self.cleanup = cleanup
6523
    self.live = False # will be overridden later
6524
    self.failover = failover
6525
    self.fallback = fallback
6526
    self.ignore_consistency = ignore_consistency
6527
    self.shutdown_timeout = shutdown_timeout
6528

    
6529
  def CheckPrereq(self):
6530
    """Check prerequisites.
6531

6532
    This checks that the instance is in the cluster.
6533

6534
    """
6535
    instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
6536
    instance = self.cfg.GetInstanceInfo(instance_name)
6537
    assert instance is not None
6538
    self.instance = instance
6539

    
6540
    if (not self.cleanup and not instance.admin_up and not self.failover and
6541
        self.fallback):
6542
      self.lu.LogInfo("Instance is marked down, fallback allowed, switching"
6543
                      " to failover")
6544
      self.failover = True
6545

    
6546
    if instance.disk_template not in constants.DTS_MIRRORED:
6547
      if self.failover:
6548
        text = "failovers"
6549
      else:
6550
        text = "migrations"
6551
      raise errors.OpPrereqError("Instance's disk layout '%s' does not allow"
6552
                                 " %s" % (instance.disk_template, text),
6553
                                 errors.ECODE_STATE)
6554

    
6555
    if instance.disk_template in constants.DTS_EXT_MIRROR:
6556
      _CheckIAllocatorOrNode(self.lu, "iallocator", "target_node")
6557

    
6558
      if self.lu.op.iallocator:
6559
        self._RunAllocator()
6560
      else:
6561
        # We set set self.target_node as it is required by
6562
        # BuildHooksEnv
6563
        self.target_node = self.lu.op.target_node
6564

    
6565
      # self.target_node is already populated, either directly or by the
6566
      # iallocator run
6567
      target_node = self.target_node
6568
      if self.target_node == instance.primary_node:
6569
        raise errors.OpPrereqError("Cannot migrate instance %s"
6570
                                   " to its primary (%s)" %
6571
                                   (instance.name, instance.primary_node))
6572

    
6573
      if len(self.lu.tasklets) == 1:
6574
        # It is safe to release locks only when we're the only tasklet
6575
        # in the LU
6576
        _ReleaseLocks(self.lu, locking.LEVEL_NODE,
6577
                      keep=[instance.primary_node, self.target_node])
6578

    
6579
    else:
6580
      secondary_nodes = instance.secondary_nodes
6581
      if not secondary_nodes:
6582
        raise errors.ConfigurationError("No secondary node but using"
6583
                                        " %s disk template" %
6584
                                        instance.disk_template)
6585
      target_node = secondary_nodes[0]
6586
      if self.lu.op.iallocator or (self.lu.op.target_node and
6587
                                   self.lu.op.target_node != target_node):
6588
        if self.failover:
6589
          text = "failed over"
6590
        else:
6591
          text = "migrated"
6592
        raise errors.OpPrereqError("Instances with disk template %s cannot"
6593
                                   " be %s to arbitrary nodes"
6594
                                   " (neither an iallocator nor a target"
6595
                                   " node can be passed)" %
6596
                                   (instance.disk_template, text),
6597
                                   errors.ECODE_INVAL)
6598

    
6599
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
6600

    
6601
    # check memory requirements on the secondary node
6602
    if not self.failover or instance.admin_up:
6603
      _CheckNodeFreeMemory(self.lu, target_node, "migrating instance %s" %
6604
                           instance.name, i_be[constants.BE_MEMORY],
6605
                           instance.hypervisor)
6606
    else:
6607
      self.lu.LogInfo("Not checking memory on the secondary node as"
6608
                      " instance will not be started")
6609

    
6610
    # check bridge existance
6611
    _CheckInstanceBridgesExist(self.lu, instance, node=target_node)
6612

    
6613
    if not self.cleanup:
6614
      _CheckNodeNotDrained(self.lu, target_node)
6615
      if not self.failover:
6616
        result = self.rpc.call_instance_migratable(instance.primary_node,
6617
                                                   instance)
6618
        if result.fail_msg and self.fallback:
6619
          self.lu.LogInfo("Can't migrate, instance offline, fallback to"
6620
                          " failover")
6621
          self.failover = True
6622
        else:
6623
          result.Raise("Can't migrate, please use failover",
6624
                       prereq=True, ecode=errors.ECODE_STATE)
6625

    
6626
    assert not (self.failover and self.cleanup)
6627

    
6628
    if not self.failover:
6629
      if self.lu.op.live is not None and self.lu.op.mode is not None:
6630
        raise errors.OpPrereqError("Only one of the 'live' and 'mode'"
6631
                                   " parameters are accepted",
6632
                                   errors.ECODE_INVAL)
6633
      if self.lu.op.live is not None:
6634
        if self.lu.op.live:
6635
          self.lu.op.mode = constants.HT_MIGRATION_LIVE
6636
        else:
6637
          self.lu.op.mode = constants.HT_MIGRATION_NONLIVE
6638
        # reset the 'live' parameter to None so that repeated
6639
        # invocations of CheckPrereq do not raise an exception
6640
        self.lu.op.live = None
6641
      elif self.lu.op.mode is None:
6642
        # read the default value from the hypervisor
6643
        i_hv = self.cfg.GetClusterInfo().FillHV(self.instance,
6644
                                                skip_globals=False)
6645
        self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
6646

    
6647
      self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
6648
    else:
6649
      # Failover is never live
6650
      self.live = False
6651

    
6652
  def _RunAllocator(self):
6653
    """Run the allocator based on input opcode.
6654

6655
    """
6656
    ial = IAllocator(self.cfg, self.rpc,
6657
                     mode=constants.IALLOCATOR_MODE_RELOC,
6658
                     name=self.instance_name,
6659
                     # TODO See why hail breaks with a single node below
6660
                     relocate_from=[self.instance.primary_node,
6661
                                    self.instance.primary_node],
6662
                     )
6663

    
6664
    ial.Run(self.lu.op.iallocator)
6665

    
6666
    if not ial.success:
6667
      raise errors.OpPrereqError("Can't compute nodes using"
6668
                                 " iallocator '%s': %s" %
6669
                                 (self.lu.op.iallocator, ial.info),
6670
                                 errors.ECODE_NORES)
6671
    if len(ial.result) != ial.required_nodes:
6672
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
6673
                                 " of nodes (%s), required %s" %
6674
                                 (self.lu.op.iallocator, len(ial.result),
6675
                                  ial.required_nodes), errors.ECODE_FAULT)
6676
    self.target_node = ial.result[0]
6677
    self.lu.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
6678
                 self.instance_name, self.lu.op.iallocator,
6679
                 utils.CommaJoin(ial.result))
6680

    
6681
  def _WaitUntilSync(self):
6682
    """Poll with custom rpc for disk sync.
6683

6684
    This uses our own step-based rpc call.
6685

6686
    """
6687
    self.feedback_fn("* wait until resync is done")
6688
    all_done = False
6689
    while not all_done:
6690
      all_done = True
6691
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
6692
                                            self.nodes_ip,
6693
                                            self.instance.disks)
6694
      min_percent = 100
6695
      for node, nres in result.items():
6696
        nres.Raise("Cannot resync disks on node %s" % node)
6697
        node_done, node_percent = nres.payload
6698
        all_done = all_done and node_done
6699
        if node_percent is not None:
6700
          min_percent = min(min_percent, node_percent)
6701
      if not all_done:
6702
        if min_percent < 100:
6703
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
6704
        time.sleep(2)
6705

    
6706
  def _EnsureSecondary(self, node):
6707
    """Demote a node to secondary.
6708

6709
    """
6710
    self.feedback_fn("* switching node %s to secondary mode" % node)
6711

    
6712
    for dev in self.instance.disks:
6713
      self.cfg.SetDiskID(dev, node)
6714

    
6715
    result = self.rpc.call_blockdev_close(node, self.instance.name,
6716
                                          self.instance.disks)
6717
    result.Raise("Cannot change disk to secondary on node %s" % node)
6718

    
6719
  def _GoStandalone(self):
6720
    """Disconnect from the network.
6721

6722
    """
6723
    self.feedback_fn("* changing into standalone mode")
6724
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
6725
                                               self.instance.disks)
6726
    for node, nres in result.items():
6727
      nres.Raise("Cannot disconnect disks node %s" % node)
6728

    
6729
  def _GoReconnect(self, multimaster):
6730
    """Reconnect to the network.
6731

6732
    """
6733
    if multimaster:
6734
      msg = "dual-master"
6735
    else:
6736
      msg = "single-master"
6737
    self.feedback_fn("* changing disks into %s mode" % msg)
6738
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
6739
                                           self.instance.disks,
6740
                                           self.instance.name, multimaster)
6741
    for node, nres in result.items():
6742
      nres.Raise("Cannot change disks config on node %s" % node)
6743

    
6744
  def _ExecCleanup(self):
6745
    """Try to cleanup after a failed migration.
6746

6747
    The cleanup is done by:
6748
      - check that the instance is running only on one node
6749
        (and update the config if needed)
6750
      - change disks on its secondary node to secondary
6751
      - wait until disks are fully synchronized
6752
      - disconnect from the network
6753
      - change disks into single-master mode
6754
      - wait again until disks are fully synchronized
6755

6756
    """
6757
    instance = self.instance
6758
    target_node = self.target_node
6759
    source_node = self.source_node
6760

    
6761
    # check running on only one node
6762
    self.feedback_fn("* checking where the instance actually runs"
6763
                     " (if this hangs, the hypervisor might be in"
6764
                     " a bad state)")
6765
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
6766
    for node, result in ins_l.items():
6767
      result.Raise("Can't contact node %s" % node)
6768

    
6769
    runningon_source = instance.name in ins_l[source_node].payload
6770
    runningon_target = instance.name in ins_l[target_node].payload
6771

    
6772
    if runningon_source and runningon_target:
6773
      raise errors.OpExecError("Instance seems to be running on two nodes,"
6774
                               " or the hypervisor is confused; you will have"
6775
                               " to ensure manually that it runs only on one"
6776
                               " and restart this operation")
6777

    
6778
    if not (runningon_source or runningon_target):
6779
      raise errors.OpExecError("Instance does not seem to be running at all;"
6780
                               " in this case it's safer to repair by"
6781
                               " running 'gnt-instance stop' to ensure disk"
6782
                               " shutdown, and then restarting it")
6783

    
6784
    if runningon_target:
6785
      # the migration has actually succeeded, we need to update the config
6786
      self.feedback_fn("* instance running on secondary node (%s),"
6787
                       " updating config" % target_node)
6788
      instance.primary_node = target_node
6789
      self.cfg.Update(instance, self.feedback_fn)
6790
      demoted_node = source_node
6791
    else:
6792
      self.feedback_fn("* instance confirmed to be running on its"
6793
                       " primary node (%s)" % source_node)
6794
      demoted_node = target_node
6795

    
6796
    if instance.disk_template in constants.DTS_INT_MIRROR:
6797
      self._EnsureSecondary(demoted_node)
6798
      try:
6799
        self._WaitUntilSync()
6800
      except errors.OpExecError:
6801
        # we ignore here errors, since if the device is standalone, it
6802
        # won't be able to sync
6803
        pass
6804
      self._GoStandalone()
6805
      self._GoReconnect(False)
6806
      self._WaitUntilSync()
6807

    
6808
    self.feedback_fn("* done")
6809

    
6810
  def _RevertDiskStatus(self):
6811
    """Try to revert the disk status after a failed migration.
6812

6813
    """
6814
    target_node = self.target_node
6815
    if self.instance.disk_template in constants.DTS_EXT_MIRROR:
6816
      return
6817

    
6818
    try:
6819
      self._EnsureSecondary(target_node)
6820
      self._GoStandalone()
6821
      self._GoReconnect(False)
6822
      self._WaitUntilSync()
6823
    except errors.OpExecError, err:
6824
      self.lu.LogWarning("Migration failed and I can't reconnect the drives,"
6825
                         " please try to recover the instance manually;"
6826
                         " error '%s'" % str(err))
6827

    
6828
  def _AbortMigration(self):
6829
    """Call the hypervisor code to abort a started migration.
6830

6831
    """
6832
    instance = self.instance
6833
    target_node = self.target_node
6834
    migration_info = self.migration_info
6835

    
6836
    abort_result = self.rpc.call_finalize_migration(target_node,
6837
                                                    instance,
6838
                                                    migration_info,
6839
                                                    False)
6840
    abort_msg = abort_result.fail_msg
6841
    if abort_msg:
6842
      logging.error("Aborting migration failed on target node %s: %s",
6843
                    target_node, abort_msg)
6844
      # Don't raise an exception here, as we stil have to try to revert the
6845
      # disk status, even if this step failed.
6846

    
6847
  def _ExecMigration(self):
6848
    """Migrate an instance.
6849

6850
    The migrate is done by:
6851
      - change the disks into dual-master mode
6852
      - wait until disks are fully synchronized again
6853
      - migrate the instance
6854
      - change disks on the new secondary node (the old primary) to secondary
6855
      - wait until disks are fully synchronized
6856
      - change disks into single-master mode
6857

6858
    """
6859
    instance = self.instance
6860
    target_node = self.target_node
6861
    source_node = self.source_node
6862

    
6863
    self.feedback_fn("* checking disk consistency between source and target")
6864
    for dev in instance.disks:
6865
      if not _CheckDiskConsistency(self.lu, dev, target_node, False):
6866
        raise errors.OpExecError("Disk %s is degraded or not fully"
6867
                                 " synchronized on target node,"
6868
                                 " aborting migration" % dev.iv_name)
6869

    
6870
    # First get the migration information from the remote node
6871
    result = self.rpc.call_migration_info(source_node, instance)
6872
    msg = result.fail_msg
6873
    if msg:
6874
      log_err = ("Failed fetching source migration information from %s: %s" %
6875
                 (source_node, msg))
6876
      logging.error(log_err)
6877
      raise errors.OpExecError(log_err)
6878

    
6879
    self.migration_info = migration_info = result.payload
6880

    
6881
    if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
6882
      # Then switch the disks to master/master mode
6883
      self._EnsureSecondary(target_node)
6884
      self._GoStandalone()
6885
      self._GoReconnect(True)
6886
      self._WaitUntilSync()
6887

    
6888
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
6889
    result = self.rpc.call_accept_instance(target_node,
6890
                                           instance,
6891
                                           migration_info,
6892
                                           self.nodes_ip[target_node])
6893

    
6894
    msg = result.fail_msg
6895
    if msg:
6896
      logging.error("Instance pre-migration failed, trying to revert"
6897
                    " disk status: %s", msg)
6898
      self.feedback_fn("Pre-migration failed, aborting")
6899
      self._AbortMigration()
6900
      self._RevertDiskStatus()
6901
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
6902
                               (instance.name, msg))
6903

    
6904
    self.feedback_fn("* migrating instance to %s" % target_node)
6905
    result = self.rpc.call_instance_migrate(source_node, instance,
6906
                                            self.nodes_ip[target_node],
6907
                                            self.live)
6908
    msg = result.fail_msg
6909
    if msg:
6910
      logging.error("Instance migration failed, trying to revert"
6911
                    " disk status: %s", msg)
6912
      self.feedback_fn("Migration failed, aborting")
6913
      self._AbortMigration()
6914
      self._RevertDiskStatus()
6915
      raise errors.OpExecError("Could not migrate instance %s: %s" %
6916
                               (instance.name, msg))
6917

    
6918
    instance.primary_node = target_node
6919
    # distribute new instance config to the other nodes
6920
    self.cfg.Update(instance, self.feedback_fn)
6921

    
6922
    result = self.rpc.call_finalize_migration(target_node,
6923
                                              instance,
6924
                                              migration_info,
6925
                                              True)
6926
    msg = result.fail_msg
6927
    if msg:
6928
      logging.error("Instance migration succeeded, but finalization failed:"
6929
                    " %s", msg)
6930
      raise errors.OpExecError("Could not finalize instance migration: %s" %
6931
                               msg)
6932

    
6933
    if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
6934
      self._EnsureSecondary(source_node)
6935
      self._WaitUntilSync()
6936
      self._GoStandalone()
6937
      self._GoReconnect(False)
6938
      self._WaitUntilSync()
6939

    
6940
    self.feedback_fn("* done")
6941

    
6942
  def _ExecFailover(self):
6943
    """Failover an instance.
6944

6945
    The failover is done by shutting it down on its present node and
6946
    starting it on the secondary.
6947

6948
    """
6949
    instance = self.instance
6950
    primary_node = self.cfg.GetNodeInfo(instance.primary_node)
6951

    
6952
    source_node = instance.primary_node
6953
    target_node = self.target_node
6954

    
6955
    if instance.admin_up:
6956
      self.feedback_fn("* checking disk consistency between source and target")
6957
      for dev in instance.disks:
6958
        # for drbd, these are drbd over lvm
6959
        if not _CheckDiskConsistency(self, dev, target_node, False):
6960
          if not self.ignore_consistency:
6961
            raise errors.OpExecError("Disk %s is degraded on target node,"
6962
                                     " aborting failover" % dev.iv_name)
6963
    else:
6964
      self.feedback_fn("* not checking disk consistency as instance is not"
6965
                       " running")
6966

    
6967
    self.feedback_fn("* shutting down instance on source node")
6968
    logging.info("Shutting down instance %s on node %s",
6969
                 instance.name, source_node)
6970

    
6971
    result = self.rpc.call_instance_shutdown(source_node, instance,
6972
                                             self.shutdown_timeout)
6973
    msg = result.fail_msg
6974
    if msg:
6975
      if self.ignore_consistency or primary_node.offline:
6976
        self.lu.LogWarning("Could not shutdown instance %s on node %s,"
6977
                           " proceeding anyway; please make sure node"
6978
                           " %s is down; error details: %s",
6979
                           instance.name, source_node, source_node, msg)
6980
      else:
6981
        raise errors.OpExecError("Could not shutdown instance %s on"
6982
                                 " node %s: %s" %
6983
                                 (instance.name, source_node, msg))
6984

    
6985
    self.feedback_fn("* deactivating the instance's disks on source node")
6986
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
6987
      raise errors.OpExecError("Can't shut down the instance's disks.")
6988

    
6989
    instance.primary_node = target_node
6990
    # distribute new instance config to the other nodes
6991
    self.cfg.Update(instance, self.feedback_fn)
6992

    
6993
    # Only start the instance if it's marked as up
6994
    if instance.admin_up:
6995
      self.feedback_fn("* activating the instance's disks on target node")
6996
      logging.info("Starting instance %s on node %s",
6997
                   instance.name, target_node)
6998

    
6999
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
7000
                                           ignore_secondaries=True)
7001
      if not disks_ok:
7002
        _ShutdownInstanceDisks(self, instance)
7003
        raise errors.OpExecError("Can't activate the instance's disks")
7004

    
7005
      self.feedback_fn("* starting the instance on the target node")
7006
      result = self.rpc.call_instance_start(target_node, instance, None, None)
7007
      msg = result.fail_msg
7008
      if msg:
7009
        _ShutdownInstanceDisks(self, instance)
7010
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
7011
                                 (instance.name, target_node, msg))
7012

    
7013
  def Exec(self, feedback_fn):
7014
    """Perform the migration.
7015

7016
    """
7017
    self.feedback_fn = feedback_fn
7018
    self.source_node = self.instance.primary_node
7019

    
7020
    # FIXME: if we implement migrate-to-any in DRBD, this needs fixing
7021
    if self.instance.disk_template in constants.DTS_INT_MIRROR:
7022
      self.target_node = self.instance.secondary_nodes[0]
7023
      # Otherwise self.target_node has been populated either
7024
      # directly, or through an iallocator.
7025

    
7026
    self.all_nodes = [self.source_node, self.target_node]
7027
    self.nodes_ip = {
7028
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
7029
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
7030
      }
7031

    
7032
    if self.failover:
7033
      feedback_fn("Failover instance %s" % self.instance.name)
7034
      self._ExecFailover()
7035
    else:
7036
      feedback_fn("Migrating instance %s" % self.instance.name)
7037

    
7038
      if self.cleanup:
7039
        return self._ExecCleanup()
7040
      else:
7041
        return self._ExecMigration()
7042

    
7043

    
7044
def _CreateBlockDev(lu, node, instance, device, force_create,
7045
                    info, force_open):
7046
  """Create a tree of block devices on a given node.
7047

7048
  If this device type has to be created on secondaries, create it and
7049
  all its children.
7050

7051
  If not, just recurse to children keeping the same 'force' value.
7052

7053
  @param lu: the lu on whose behalf we execute
7054
  @param node: the node on which to create the device
7055
  @type instance: L{objects.Instance}
7056
  @param instance: the instance which owns the device
7057
  @type device: L{objects.Disk}
7058
  @param device: the device to create
7059
  @type force_create: boolean
7060
  @param force_create: whether to force creation of this device; this
7061
      will be change to True whenever we find a device which has
7062
      CreateOnSecondary() attribute
7063
  @param info: the extra 'metadata' we should attach to the device
7064
      (this will be represented as a LVM tag)
7065
  @type force_open: boolean
7066
  @param force_open: this parameter will be passes to the
7067
      L{backend.BlockdevCreate} function where it specifies
7068
      whether we run on primary or not, and it affects both
7069
      the child assembly and the device own Open() execution
7070

7071
  """
7072
  if device.CreateOnSecondary():
7073
    force_create = True
7074

    
7075
  if device.children:
7076
    for child in device.children:
7077
      _CreateBlockDev(lu, node, instance, child, force_create,
7078
                      info, force_open)
7079

    
7080
  if not force_create:
7081
    return
7082

    
7083
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
7084

    
7085

    
7086
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
7087
  """Create a single block device on a given node.
7088

7089
  This will not recurse over children of the device, so they must be
7090
  created in advance.
7091

7092
  @param lu: the lu on whose behalf we execute
7093
  @param node: the node on which to create the device
7094
  @type instance: L{objects.Instance}
7095
  @param instance: the instance which owns the device
7096
  @type device: L{objects.Disk}
7097
  @param device: the device to create
7098
  @param info: the extra 'metadata' we should attach to the device
7099
      (this will be represented as a LVM tag)
7100
  @type force_open: boolean
7101
  @param force_open: this parameter will be passes to the
7102
      L{backend.BlockdevCreate} function where it specifies
7103
      whether we run on primary or not, and it affects both
7104
      the child assembly and the device own Open() execution
7105

7106
  """
7107
  lu.cfg.SetDiskID(device, node)
7108
  result = lu.rpc.call_blockdev_create(node, device, device.size,
7109
                                       instance.name, force_open, info)
7110
  result.Raise("Can't create block device %s on"
7111
               " node %s for instance %s" % (device, node, instance.name))
7112
  if device.physical_id is None:
7113
    device.physical_id = result.payload
7114

    
7115

    
7116
def _GenerateUniqueNames(lu, exts):
7117
  """Generate a suitable LV name.
7118

7119
  This will generate a logical volume name for the given instance.
7120

7121
  """
7122
  results = []
7123
  for val in exts:
7124
    new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
7125
    results.append("%s%s" % (new_id, val))
7126
  return results
7127

    
7128

    
7129
def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names,
7130
                         iv_name, p_minor, s_minor):
7131
  """Generate a drbd8 device complete with its children.
7132

7133
  """
7134
  assert len(vgnames) == len(names) == 2
7135
  port = lu.cfg.AllocatePort()
7136
  shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
7137
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
7138
                          logical_id=(vgnames[0], names[0]))
7139
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
7140
                          logical_id=(vgnames[1], names[1]))
7141
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
7142
                          logical_id=(primary, secondary, port,
7143
                                      p_minor, s_minor,
7144
                                      shared_secret),
7145
                          children=[dev_data, dev_meta],
7146
                          iv_name=iv_name)
7147
  return drbd_dev
7148

    
7149

    
7150
def _GenerateDiskTemplate(lu, template_name,
7151
                          instance_name, primary_node,
7152
                          secondary_nodes, disk_info,
7153
                          file_storage_dir, file_driver,
7154
                          base_index, feedback_fn):
7155
  """Generate the entire disk layout for a given template type.
7156

7157
  """
7158
  #TODO: compute space requirements
7159

    
7160
  vgname = lu.cfg.GetVGName()
7161
  disk_count = len(disk_info)
7162
  disks = []
7163
  if template_name == constants.DT_DISKLESS:
7164
    pass
7165
  elif template_name == constants.DT_PLAIN:
7166
    if len(secondary_nodes) != 0:
7167
      raise errors.ProgrammerError("Wrong template configuration")
7168

    
7169
    names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
7170
                                      for i in range(disk_count)])
7171
    for idx, disk in enumerate(disk_info):
7172
      disk_index = idx + base_index
7173
      vg = disk.get(constants.IDISK_VG, vgname)
7174
      feedback_fn("* disk %i, vg %s, name %s" % (idx, vg, names[idx]))
7175
      disk_dev = objects.Disk(dev_type=constants.LD_LV,
7176
                              size=disk[constants.IDISK_SIZE],
7177
                              logical_id=(vg, names[idx]),
7178
                              iv_name="disk/%d" % disk_index,
7179
                              mode=disk[constants.IDISK_MODE])
7180
      disks.append(disk_dev)
7181
  elif template_name == constants.DT_DRBD8:
7182
    if len(secondary_nodes) != 1:
7183
      raise errors.ProgrammerError("Wrong template configuration")
7184
    remote_node = secondary_nodes[0]
7185
    minors = lu.cfg.AllocateDRBDMinor(
7186
      [primary_node, remote_node] * len(disk_info), instance_name)
7187

    
7188
    names = []
7189
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
7190
                                               for i in range(disk_count)]):
7191
      names.append(lv_prefix + "_data")
7192
      names.append(lv_prefix + "_meta")
7193
    for idx, disk in enumerate(disk_info):
7194
      disk_index = idx + base_index
7195
      data_vg = disk.get(constants.IDISK_VG, vgname)
7196
      meta_vg = disk.get(constants.IDISK_METAVG, data_vg)
7197
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
7198
                                      disk[constants.IDISK_SIZE],
7199
                                      [data_vg, meta_vg],
7200
                                      names[idx * 2:idx * 2 + 2],
7201
                                      "disk/%d" % disk_index,
7202
                                      minors[idx * 2], minors[idx * 2 + 1])
7203
      disk_dev.mode = disk[constants.IDISK_MODE]
7204
      disks.append(disk_dev)
7205
  elif template_name == constants.DT_FILE:
7206
    if len(secondary_nodes) != 0:
7207
      raise errors.ProgrammerError("Wrong template configuration")
7208

    
7209
    opcodes.RequireFileStorage()
7210

    
7211
    for idx, disk in enumerate(disk_info):
7212
      disk_index = idx + base_index
7213
      disk_dev = objects.Disk(dev_type=constants.LD_FILE,
7214
                              size=disk[constants.IDISK_SIZE],
7215
                              iv_name="disk/%d" % disk_index,
7216
                              logical_id=(file_driver,
7217
                                          "%s/disk%d" % (file_storage_dir,
7218
                                                         disk_index)),
7219
                              mode=disk[constants.IDISK_MODE])
7220
      disks.append(disk_dev)
7221
  elif template_name == constants.DT_SHARED_FILE:
7222
    if len(secondary_nodes) != 0:
7223
      raise errors.ProgrammerError("Wrong template configuration")
7224

    
7225
    opcodes.RequireSharedFileStorage()
7226

    
7227
    for idx, disk in enumerate(disk_info):
7228
      disk_index = idx + base_index
7229
      disk_dev = objects.Disk(dev_type=constants.LD_FILE,
7230
                              size=disk[constants.IDISK_SIZE],
7231
                              iv_name="disk/%d" % disk_index,
7232
                              logical_id=(file_driver,
7233
                                          "%s/disk%d" % (file_storage_dir,
7234
                                                         disk_index)),
7235
                              mode=disk[constants.IDISK_MODE])
7236
      disks.append(disk_dev)
7237
  elif template_name == constants.DT_BLOCK:
7238
    if len(secondary_nodes) != 0:
7239
      raise errors.ProgrammerError("Wrong template configuration")
7240

    
7241
    for idx, disk in enumerate(disk_info):
7242
      disk_index = idx + base_index
7243
      disk_dev = objects.Disk(dev_type=constants.LD_BLOCKDEV,
7244
                              size=disk[constants.IDISK_SIZE],
7245
                              logical_id=(constants.BLOCKDEV_DRIVER_MANUAL,
7246
                                          disk[constants.IDISK_ADOPT]),
7247
                              iv_name="disk/%d" % disk_index,
7248
                              mode=disk[constants.IDISK_MODE])
7249
      disks.append(disk_dev)
7250

    
7251
  else:
7252
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
7253
  return disks
7254

    
7255

    
7256
def _GetInstanceInfoText(instance):
7257
  """Compute that text that should be added to the disk's metadata.
7258

7259
  """
7260
  return "originstname+%s" % instance.name
7261

    
7262

    
7263
def _CalcEta(time_taken, written, total_size):
7264
  """Calculates the ETA based on size written and total size.
7265

7266
  @param time_taken: The time taken so far
7267
  @param written: amount written so far
7268
  @param total_size: The total size of data to be written
7269
  @return: The remaining time in seconds
7270

7271
  """
7272
  avg_time = time_taken / float(written)
7273
  return (total_size - written) * avg_time
7274

    
7275

    
7276
def _WipeDisks(lu, instance):
7277
  """Wipes instance disks.
7278

7279
  @type lu: L{LogicalUnit}
7280
  @param lu: the logical unit on whose behalf we execute
7281
  @type instance: L{objects.Instance}
7282
  @param instance: the instance whose disks we should create
7283
  @return: the success of the wipe
7284

7285
  """
7286
  node = instance.primary_node
7287

    
7288
  for device in instance.disks:
7289
    lu.cfg.SetDiskID(device, node)
7290

    
7291
  logging.info("Pause sync of instance %s disks", instance.name)
7292
  result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, True)
7293

    
7294
  for idx, success in enumerate(result.payload):
7295
    if not success:
7296
      logging.warn("pause-sync of instance %s for disks %d failed",
7297
                   instance.name, idx)
7298

    
7299
  try:
7300
    for idx, device in enumerate(instance.disks):
7301
      # The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
7302
      # MAX_WIPE_CHUNK at max
7303
      wipe_chunk_size = min(constants.MAX_WIPE_CHUNK, device.size / 100.0 *
7304
                            constants.MIN_WIPE_CHUNK_PERCENT)
7305
      # we _must_ make this an int, otherwise rounding errors will
7306
      # occur
7307
      wipe_chunk_size = int(wipe_chunk_size)
7308

    
7309
      lu.LogInfo("* Wiping disk %d", idx)
7310
      logging.info("Wiping disk %d for instance %s, node %s using"
7311
                   " chunk size %s", idx, instance.name, node, wipe_chunk_size)
7312

    
7313
      offset = 0
7314
      size = device.size
7315
      last_output = 0
7316
      start_time = time.time()
7317

    
7318
      while offset < size:
7319
        wipe_size = min(wipe_chunk_size, size - offset)
7320
        logging.debug("Wiping disk %d, offset %s, chunk %s",
7321
                      idx, offset, wipe_size)
7322
        result = lu.rpc.call_blockdev_wipe(node, device, offset, wipe_size)
7323
        result.Raise("Could not wipe disk %d at offset %d for size %d" %
7324
                     (idx, offset, wipe_size))
7325
        now = time.time()
7326
        offset += wipe_size
7327
        if now - last_output >= 60:
7328
          eta = _CalcEta(now - start_time, offset, size)
7329
          lu.LogInfo(" - done: %.1f%% ETA: %s" %
7330
                     (offset / float(size) * 100, utils.FormatSeconds(eta)))
7331
          last_output = now
7332
  finally:
7333
    logging.info("Resume sync of instance %s disks", instance.name)
7334

    
7335
    result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, False)
7336

    
7337
    for idx, success in enumerate(result.payload):
7338
      if not success:
7339
        lu.LogWarning("Resume sync of disk %d failed, please have a"
7340
                      " look at the status and troubleshoot the issue", idx)
7341
        logging.warn("resume-sync of instance %s for disks %d failed",
7342
                     instance.name, idx)
7343

    
7344

    
7345
def _CreateDisks(lu, instance, to_skip=None, target_node=None):
7346
  """Create all disks for an instance.
7347

7348
  This abstracts away some work from AddInstance.
7349

7350
  @type lu: L{LogicalUnit}
7351
  @param lu: the logical unit on whose behalf we execute
7352
  @type instance: L{objects.Instance}
7353
  @param instance: the instance whose disks we should create
7354
  @type to_skip: list
7355
  @param to_skip: list of indices to skip
7356
  @type target_node: string
7357
  @param target_node: if passed, overrides the target node for creation
7358
  @rtype: boolean
7359
  @return: the success of the creation
7360

7361
  """
7362
  info = _GetInstanceInfoText(instance)
7363
  if target_node is None:
7364
    pnode = instance.primary_node
7365
    all_nodes = instance.all_nodes
7366
  else:
7367
    pnode = target_node
7368
    all_nodes = [pnode]
7369

    
7370
  if instance.disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE):
7371
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
7372
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
7373

    
7374
    result.Raise("Failed to create directory '%s' on"
7375
                 " node %s" % (file_storage_dir, pnode))
7376

    
7377
  # Note: this needs to be kept in sync with adding of disks in
7378
  # LUInstanceSetParams
7379
  for idx, device in enumerate(instance.disks):
7380
    if to_skip and idx in to_skip:
7381
      continue
7382
    logging.info("Creating volume %s for instance %s",
7383
                 device.iv_name, instance.name)
7384
    #HARDCODE
7385
    for node in all_nodes:
7386
      f_create = node == pnode
7387
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
7388

    
7389

    
7390
def _RemoveDisks(lu, instance, target_node=None):
7391
  """Remove all disks for an instance.
7392

7393
  This abstracts away some work from `AddInstance()` and
7394
  `RemoveInstance()`. Note that in case some of the devices couldn't
7395
  be removed, the removal will continue with the other ones (compare
7396
  with `_CreateDisks()`).
7397

7398
  @type lu: L{LogicalUnit}
7399
  @param lu: the logical unit on whose behalf we execute
7400
  @type instance: L{objects.Instance}
7401
  @param instance: the instance whose disks we should remove
7402
  @type target_node: string
7403
  @param target_node: used to override the node on which to remove the disks
7404
  @rtype: boolean
7405
  @return: the success of the removal
7406

7407
  """
7408
  logging.info("Removing block devices for instance %s", instance.name)
7409

    
7410
  all_result = True
7411
  for device in instance.disks:
7412
    if target_node:
7413
      edata = [(target_node, device)]
7414
    else:
7415
      edata = device.ComputeNodeTree(instance.primary_node)
7416
    for node, disk in edata:
7417
      lu.cfg.SetDiskID(disk, node)
7418
      msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
7419
      if msg:
7420
        lu.LogWarning("Could not remove block device %s on node %s,"
7421
                      " continuing anyway: %s", device.iv_name, node, msg)
7422
        all_result = False
7423

    
7424
  if instance.disk_template == constants.DT_FILE:
7425
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
7426
    if target_node:
7427
      tgt = target_node
7428
    else:
7429
      tgt = instance.primary_node
7430
    result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
7431
    if result.fail_msg:
7432
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
7433
                    file_storage_dir, instance.primary_node, result.fail_msg)
7434
      all_result = False
7435

    
7436
  return all_result
7437

    
7438

    
7439
def _ComputeDiskSizePerVG(disk_template, disks):
7440
  """Compute disk size requirements in the volume group
7441

7442
  """
7443
  def _compute(disks, payload):
7444
    """Universal algorithm.
7445

7446
    """
7447
    vgs = {}
7448
    for disk in disks:
7449
      vgs[disk[constants.IDISK_VG]] = \
7450
        vgs.get(constants.IDISK_VG, 0) + disk[constants.IDISK_SIZE] + payload
7451

    
7452
    return vgs
7453

    
7454
  # Required free disk space as a function of disk and swap space
7455
  req_size_dict = {
7456
    constants.DT_DISKLESS: {},
7457
    constants.DT_PLAIN: _compute(disks, 0),
7458
    # 128 MB are added for drbd metadata for each disk
7459
    constants.DT_DRBD8: _compute(disks, 128),
7460
    constants.DT_FILE: {},
7461
    constants.DT_SHARED_FILE: {},
7462
  }
7463

    
7464
  if disk_template not in req_size_dict:
7465
    raise errors.ProgrammerError("Disk template '%s' size requirement"
7466
                                 " is unknown" %  disk_template)
7467

    
7468
  return req_size_dict[disk_template]
7469

    
7470

    
7471
def _ComputeDiskSize(disk_template, disks):
7472
  """Compute disk size requirements in the volume group
7473

7474
  """
7475
  # Required free disk space as a function of disk and swap space
7476
  req_size_dict = {
7477
    constants.DT_DISKLESS: None,
7478
    constants.DT_PLAIN: sum(d[constants.IDISK_SIZE] for d in disks),
7479
    # 128 MB are added for drbd metadata for each disk
7480
    constants.DT_DRBD8: sum(d[constants.IDISK_SIZE] + 128 for d in disks),
7481
    constants.DT_FILE: None,
7482
    constants.DT_SHARED_FILE: 0,
7483
    constants.DT_BLOCK: 0,
7484
  }
7485

    
7486
  if disk_template not in req_size_dict:
7487
    raise errors.ProgrammerError("Disk template '%s' size requirement"
7488
                                 " is unknown" %  disk_template)
7489

    
7490
  return req_size_dict[disk_template]
7491

    
7492

    
7493
def _FilterVmNodes(lu, nodenames):
7494
  """Filters out non-vm_capable nodes from a list.
7495

7496
  @type lu: L{LogicalUnit}
7497
  @param lu: the logical unit for which we check
7498
  @type nodenames: list
7499
  @param nodenames: the list of nodes on which we should check
7500
  @rtype: list
7501
  @return: the list of vm-capable nodes
7502

7503
  """
7504
  vm_nodes = frozenset(lu.cfg.GetNonVmCapableNodeList())
7505
  return [name for name in nodenames if name not in vm_nodes]
7506

    
7507

    
7508
def _CheckHVParams(lu, nodenames, hvname, hvparams):
7509
  """Hypervisor parameter validation.
7510

7511
  This function abstract the hypervisor parameter validation to be
7512
  used in both instance create and instance modify.
7513

7514
  @type lu: L{LogicalUnit}
7515
  @param lu: the logical unit for which we check
7516
  @type nodenames: list
7517
  @param nodenames: the list of nodes on which we should check
7518
  @type hvname: string
7519
  @param hvname: the name of the hypervisor we should use
7520
  @type hvparams: dict
7521
  @param hvparams: the parameters which we need to check
7522
  @raise errors.OpPrereqError: if the parameters are not valid
7523

7524
  """
7525
  nodenames = _FilterVmNodes(lu, nodenames)
7526
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
7527
                                                  hvname,
7528
                                                  hvparams)
7529
  for node in nodenames:
7530
    info = hvinfo[node]
7531
    if info.offline:
7532
      continue
7533
    info.Raise("Hypervisor parameter validation failed on node %s" % node)
7534

    
7535

    
7536
def _CheckOSParams(lu, required, nodenames, osname, osparams):
7537
  """OS parameters validation.
7538

7539
  @type lu: L{LogicalUnit}
7540
  @param lu: the logical unit for which we check
7541
  @type required: boolean
7542
  @param required: whether the validation should fail if the OS is not
7543
      found
7544
  @type nodenames: list
7545
  @param nodenames: the list of nodes on which we should check
7546
  @type osname: string
7547
  @param osname: the name of the hypervisor we should use
7548
  @type osparams: dict
7549
  @param osparams: the parameters which we need to check
7550
  @raise errors.OpPrereqError: if the parameters are not valid
7551

7552
  """
7553
  nodenames = _FilterVmNodes(lu, nodenames)
7554
  result = lu.rpc.call_os_validate(required, nodenames, osname,
7555
                                   [constants.OS_VALIDATE_PARAMETERS],
7556
                                   osparams)
7557
  for node, nres in result.items():
7558
    # we don't check for offline cases since this should be run only
7559
    # against the master node and/or an instance's nodes
7560
    nres.Raise("OS Parameters validation failed on node %s" % node)
7561
    if not nres.payload:
7562
      lu.LogInfo("OS %s not found on node %s, validation skipped",
7563
                 osname, node)
7564

    
7565

    
7566
class LUInstanceCreate(LogicalUnit):
7567
  """Create an instance.
7568

7569
  """
7570
  HPATH = "instance-add"
7571
  HTYPE = constants.HTYPE_INSTANCE
7572
  REQ_BGL = False
7573

    
7574
  def CheckArguments(self):
7575
    """Check arguments.
7576

7577
    """
7578
    # do not require name_check to ease forward/backward compatibility
7579
    # for tools
7580
    if self.op.no_install and self.op.start:
7581
      self.LogInfo("No-installation mode selected, disabling startup")
7582
      self.op.start = False
7583
    # validate/normalize the instance name
7584
    self.op.instance_name = \
7585
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
7586

    
7587
    if self.op.ip_check and not self.op.name_check:
7588
      # TODO: make the ip check more flexible and not depend on the name check
7589
      raise errors.OpPrereqError("Cannot do IP address check without a name"
7590
                                 " check", errors.ECODE_INVAL)
7591

    
7592
    # check nics' parameter names
7593
    for nic in self.op.nics:
7594
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
7595

    
7596
    # check disks. parameter names and consistent adopt/no-adopt strategy
7597
    has_adopt = has_no_adopt = False
7598
    for disk in self.op.disks:
7599
      utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
7600
      if constants.IDISK_ADOPT in disk:
7601
        has_adopt = True
7602
      else:
7603
        has_no_adopt = True
7604
    if has_adopt and has_no_adopt:
7605
      raise errors.OpPrereqError("Either all disks are adopted or none is",
7606
                                 errors.ECODE_INVAL)
7607
    if has_adopt:
7608
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
7609
        raise errors.OpPrereqError("Disk adoption is not supported for the"
7610
                                   " '%s' disk template" %
7611
                                   self.op.disk_template,
7612
                                   errors.ECODE_INVAL)
7613
      if self.op.iallocator is not None:
7614
        raise errors.OpPrereqError("Disk adoption not allowed with an"
7615
                                   " iallocator script", errors.ECODE_INVAL)
7616
      if self.op.mode == constants.INSTANCE_IMPORT:
7617
        raise errors.OpPrereqError("Disk adoption not allowed for"
7618
                                   " instance import", errors.ECODE_INVAL)
7619
    else:
7620
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
7621
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
7622
                                   " but no 'adopt' parameter given" %
7623
                                   self.op.disk_template,
7624
                                   errors.ECODE_INVAL)
7625

    
7626
    self.adopt_disks = has_adopt
7627

    
7628
    # instance name verification
7629
    if self.op.name_check:
7630
      self.hostname1 = netutils.GetHostname(name=self.op.instance_name)
7631
      self.op.instance_name = self.hostname1.name
7632
      # used in CheckPrereq for ip ping check
7633
      self.check_ip = self.hostname1.ip
7634
    else:
7635
      self.check_ip = None
7636

    
7637
    # file storage checks
7638
    if (self.op.file_driver and
7639
        not self.op.file_driver in constants.FILE_DRIVER):
7640
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
7641
                                 self.op.file_driver, errors.ECODE_INVAL)
7642

    
7643
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
7644
      raise errors.OpPrereqError("File storage directory path not absolute",
7645
                                 errors.ECODE_INVAL)
7646

    
7647
    ### Node/iallocator related checks
7648
    _CheckIAllocatorOrNode(self, "iallocator", "pnode")
7649

    
7650
    if self.op.pnode is not None:
7651
      if self.op.disk_template in constants.DTS_INT_MIRROR:
7652
        if self.op.snode is None:
7653
          raise errors.OpPrereqError("The networked disk templates need"
7654
                                     " a mirror node", errors.ECODE_INVAL)
7655
      elif self.op.snode:
7656
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
7657
                        " template")
7658
        self.op.snode = None
7659

    
7660
    self._cds = _GetClusterDomainSecret()
7661

    
7662
    if self.op.mode == constants.INSTANCE_IMPORT:
7663
      # On import force_variant must be True, because if we forced it at
7664
      # initial install, our only chance when importing it back is that it
7665
      # works again!
7666
      self.op.force_variant = True
7667

    
7668
      if self.op.no_install:
7669
        self.LogInfo("No-installation mode has no effect during import")
7670

    
7671
    elif self.op.mode == constants.INSTANCE_CREATE:
7672
      if self.op.os_type is None:
7673
        raise errors.OpPrereqError("No guest OS specified",
7674
                                   errors.ECODE_INVAL)
7675
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
7676
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
7677
                                   " installation" % self.op.os_type,
7678
                                   errors.ECODE_STATE)
7679
      if self.op.disk_template is None:
7680
        raise errors.OpPrereqError("No disk template specified",
7681
                                   errors.ECODE_INVAL)
7682

    
7683
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
7684
      # Check handshake to ensure both clusters have the same domain secret
7685
      src_handshake = self.op.source_handshake
7686
      if not src_handshake:
7687
        raise errors.OpPrereqError("Missing source handshake",
7688
                                   errors.ECODE_INVAL)
7689

    
7690
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
7691
                                                           src_handshake)
7692
      if errmsg:
7693
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
7694
                                   errors.ECODE_INVAL)
7695

    
7696
      # Load and check source CA
7697
      self.source_x509_ca_pem = self.op.source_x509_ca
7698
      if not self.source_x509_ca_pem:
7699
        raise errors.OpPrereqError("Missing source X509 CA",
7700
                                   errors.ECODE_INVAL)
7701

    
7702
      try:
7703
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
7704
                                                    self._cds)
7705
      except OpenSSL.crypto.Error, err:
7706
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
7707
                                   (err, ), errors.ECODE_INVAL)
7708

    
7709
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
7710
      if errcode is not None:
7711
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
7712
                                   errors.ECODE_INVAL)
7713

    
7714
      self.source_x509_ca = cert
7715

    
7716
      src_instance_name = self.op.source_instance_name
7717
      if not src_instance_name:
7718
        raise errors.OpPrereqError("Missing source instance name",
7719
                                   errors.ECODE_INVAL)
7720

    
7721
      self.source_instance_name = \
7722
          netutils.GetHostname(name=src_instance_name).name
7723

    
7724
    else:
7725
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
7726
                                 self.op.mode, errors.ECODE_INVAL)
7727

    
7728
  def ExpandNames(self):
7729
    """ExpandNames for CreateInstance.
7730

7731
    Figure out the right locks for instance creation.
7732

7733
    """
7734
    self.needed_locks = {}
7735

    
7736
    instance_name = self.op.instance_name
7737
    # this is just a preventive check, but someone might still add this
7738
    # instance in the meantime, and creation will fail at lock-add time
7739
    if instance_name in self.cfg.GetInstanceList():
7740
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
7741
                                 instance_name, errors.ECODE_EXISTS)
7742

    
7743
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
7744

    
7745
    if self.op.iallocator:
7746
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7747
    else:
7748
      self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
7749
      nodelist = [self.op.pnode]
7750
      if self.op.snode is not None:
7751
        self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
7752
        nodelist.append(self.op.snode)
7753
      self.needed_locks[locking.LEVEL_NODE] = nodelist
7754

    
7755
    # in case of import lock the source node too
7756
    if self.op.mode == constants.INSTANCE_IMPORT:
7757
      src_node = self.op.src_node
7758
      src_path = self.op.src_path
7759

    
7760
      if src_path is None:
7761
        self.op.src_path = src_path = self.op.instance_name
7762

    
7763
      if src_node is None:
7764
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7765
        self.op.src_node = None
7766
        if os.path.isabs(src_path):
7767
          raise errors.OpPrereqError("Importing an instance from an absolute"
7768
                                     " path requires a source node option",
7769
                                     errors.ECODE_INVAL)
7770
      else:
7771
        self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
7772
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
7773
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
7774
        if not os.path.isabs(src_path):
7775
          self.op.src_path = src_path = \
7776
            utils.PathJoin(constants.EXPORT_DIR, src_path)
7777

    
7778
  def _RunAllocator(self):
7779
    """Run the allocator based on input opcode.
7780

7781
    """
7782
    nics = [n.ToDict() for n in self.nics]
7783
    ial = IAllocator(self.cfg, self.rpc,
7784
                     mode=constants.IALLOCATOR_MODE_ALLOC,
7785
                     name=self.op.instance_name,
7786
                     disk_template=self.op.disk_template,
7787
                     tags=[],
7788
                     os=self.op.os_type,
7789
                     vcpus=self.be_full[constants.BE_VCPUS],
7790
                     mem_size=self.be_full[constants.BE_MEMORY],
7791
                     disks=self.disks,
7792
                     nics=nics,
7793
                     hypervisor=self.op.hypervisor,
7794
                     )
7795

    
7796
    ial.Run(self.op.iallocator)
7797

    
7798
    if not ial.success:
7799
      raise errors.OpPrereqError("Can't compute nodes using"
7800
                                 " iallocator '%s': %s" %
7801
                                 (self.op.iallocator, ial.info),
7802
                                 errors.ECODE_NORES)
7803
    if len(ial.result) != ial.required_nodes:
7804
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
7805
                                 " of nodes (%s), required %s" %
7806
                                 (self.op.iallocator, len(ial.result),
7807
                                  ial.required_nodes), errors.ECODE_FAULT)
7808
    self.op.pnode = ial.result[0]
7809
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
7810
                 self.op.instance_name, self.op.iallocator,
7811
                 utils.CommaJoin(ial.result))
7812
    if ial.required_nodes == 2:
7813
      self.op.snode = ial.result[1]
7814

    
7815
  def BuildHooksEnv(self):
7816
    """Build hooks env.
7817

7818
    This runs on master, primary and secondary nodes of the instance.
7819

7820
    """
7821
    env = {
7822
      "ADD_MODE": self.op.mode,
7823
      }
7824
    if self.op.mode == constants.INSTANCE_IMPORT:
7825
      env["SRC_NODE"] = self.op.src_node
7826
      env["SRC_PATH"] = self.op.src_path
7827
      env["SRC_IMAGES"] = self.src_images
7828

    
7829
    env.update(_BuildInstanceHookEnv(
7830
      name=self.op.instance_name,
7831
      primary_node=self.op.pnode,
7832
      secondary_nodes=self.secondaries,
7833
      status=self.op.start,
7834
      os_type=self.op.os_type,
7835
      memory=self.be_full[constants.BE_MEMORY],
7836
      vcpus=self.be_full[constants.BE_VCPUS],
7837
      nics=_NICListToTuple(self, self.nics),
7838
      disk_template=self.op.disk_template,
7839
      disks=[(d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
7840
             for d in self.disks],
7841
      bep=self.be_full,
7842
      hvp=self.hv_full,
7843
      hypervisor_name=self.op.hypervisor,
7844
    ))
7845

    
7846
    return env
7847

    
7848
  def BuildHooksNodes(self):
7849
    """Build hooks nodes.
7850

7851
    """
7852
    nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
7853
    return nl, nl
7854

    
7855
  def _ReadExportInfo(self):
7856
    """Reads the export information from disk.
7857

7858
    It will override the opcode source node and path with the actual
7859
    information, if these two were not specified before.
7860

7861
    @return: the export information
7862

7863
    """
7864
    assert self.op.mode == constants.INSTANCE_IMPORT
7865

    
7866
    src_node = self.op.src_node
7867
    src_path = self.op.src_path
7868

    
7869
    if src_node is None:
7870
      locked_nodes = self.glm.list_owned(locking.LEVEL_NODE)
7871
      exp_list = self.rpc.call_export_list(locked_nodes)
7872
      found = False
7873
      for node in exp_list:
7874
        if exp_list[node].fail_msg:
7875
          continue
7876
        if src_path in exp_list[node].payload:
7877
          found = True
7878
          self.op.src_node = src_node = node
7879
          self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
7880
                                                       src_path)
7881
          break
7882
      if not found:
7883
        raise errors.OpPrereqError("No export found for relative path %s" %
7884
                                    src_path, errors.ECODE_INVAL)
7885

    
7886
    _CheckNodeOnline(self, src_node)
7887
    result = self.rpc.call_export_info(src_node, src_path)
7888
    result.Raise("No export or invalid export found in dir %s" % src_path)
7889

    
7890
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
7891
    if not export_info.has_section(constants.INISECT_EXP):
7892
      raise errors.ProgrammerError("Corrupted export config",
7893
                                   errors.ECODE_ENVIRON)
7894

    
7895
    ei_version = export_info.get(constants.INISECT_EXP, "version")
7896
    if (int(ei_version) != constants.EXPORT_VERSION):
7897
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
7898
                                 (ei_version, constants.EXPORT_VERSION),
7899
                                 errors.ECODE_ENVIRON)
7900
    return export_info
7901

    
7902
  def _ReadExportParams(self, einfo):
7903
    """Use export parameters as defaults.
7904

7905
    In case the opcode doesn't specify (as in override) some instance
7906
    parameters, then try to use them from the export information, if
7907
    that declares them.
7908

7909
    """
7910
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
7911

    
7912
    if self.op.disk_template is None:
7913
      if einfo.has_option(constants.INISECT_INS, "disk_template"):
7914
        self.op.disk_template = einfo.get(constants.INISECT_INS,
7915
                                          "disk_template")
7916
      else:
7917
        raise errors.OpPrereqError("No disk template specified and the export"
7918
                                   " is missing the disk_template information",
7919
                                   errors.ECODE_INVAL)
7920

    
7921
    if not self.op.disks:
7922
      if einfo.has_option(constants.INISECT_INS, "disk_count"):
7923
        disks = []
7924
        # TODO: import the disk iv_name too
7925
        for idx in range(einfo.getint(constants.INISECT_INS, "disk_count")):
7926
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
7927
          disks.append({constants.IDISK_SIZE: disk_sz})
7928
        self.op.disks = disks
7929
      else:
7930
        raise errors.OpPrereqError("No disk info specified and the export"
7931
                                   " is missing the disk information",
7932
                                   errors.ECODE_INVAL)
7933

    
7934
    if (not self.op.nics and
7935
        einfo.has_option(constants.INISECT_INS, "nic_count")):
7936
      nics = []
7937
      for idx in range(einfo.getint(constants.INISECT_INS, "nic_count")):
7938
        ndict = {}
7939
        for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
7940
          v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
7941
          ndict[name] = v
7942
        nics.append(ndict)
7943
      self.op.nics = nics
7944

    
7945
    if (self.op.hypervisor is None and
7946
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
7947
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
7948
    if einfo.has_section(constants.INISECT_HYP):
7949
      # use the export parameters but do not override the ones
7950
      # specified by the user
7951
      for name, value in einfo.items(constants.INISECT_HYP):
7952
        if name not in self.op.hvparams:
7953
          self.op.hvparams[name] = value
7954

    
7955
    if einfo.has_section(constants.INISECT_BEP):
7956
      # use the parameters, without overriding
7957
      for name, value in einfo.items(constants.INISECT_BEP):
7958
        if name not in self.op.beparams:
7959
          self.op.beparams[name] = value
7960
    else:
7961
      # try to read the parameters old style, from the main section
7962
      for name in constants.BES_PARAMETERS:
7963
        if (name not in self.op.beparams and
7964
            einfo.has_option(constants.INISECT_INS, name)):
7965
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
7966

    
7967
    if einfo.has_section(constants.INISECT_OSP):
7968
      # use the parameters, without overriding
7969
      for name, value in einfo.items(constants.INISECT_OSP):
7970
        if name not in self.op.osparams:
7971
          self.op.osparams[name] = value
7972

    
7973
  def _RevertToDefaults(self, cluster):
7974
    """Revert the instance parameters to the default values.
7975

7976
    """
7977
    # hvparams
7978
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
7979
    for name in self.op.hvparams.keys():
7980
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
7981
        del self.op.hvparams[name]
7982
    # beparams
7983
    be_defs = cluster.SimpleFillBE({})
7984
    for name in self.op.beparams.keys():
7985
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
7986
        del self.op.beparams[name]
7987
    # nic params
7988
    nic_defs = cluster.SimpleFillNIC({})
7989
    for nic in self.op.nics:
7990
      for name in constants.NICS_PARAMETERS:
7991
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
7992
          del nic[name]
7993
    # osparams
7994
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
7995
    for name in self.op.osparams.keys():
7996
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
7997
        del self.op.osparams[name]
7998

    
7999
  def CheckPrereq(self):
8000
    """Check prerequisites.
8001

8002
    """
8003
    if self.op.mode == constants.INSTANCE_IMPORT:
8004
      export_info = self._ReadExportInfo()
8005
      self._ReadExportParams(export_info)
8006

    
8007
    if (not self.cfg.GetVGName() and
8008
        self.op.disk_template not in constants.DTS_NOT_LVM):
8009
      raise errors.OpPrereqError("Cluster does not support lvm-based"
8010
                                 " instances", errors.ECODE_STATE)
8011

    
8012
    if self.op.hypervisor is None:
8013
      self.op.hypervisor = self.cfg.GetHypervisorType()
8014

    
8015
    cluster = self.cfg.GetClusterInfo()
8016
    enabled_hvs = cluster.enabled_hypervisors
8017
    if self.op.hypervisor not in enabled_hvs:
8018
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
8019
                                 " cluster (%s)" % (self.op.hypervisor,
8020
                                  ",".join(enabled_hvs)),
8021
                                 errors.ECODE_STATE)
8022

    
8023
    # check hypervisor parameter syntax (locally)
8024
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
8025
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
8026
                                      self.op.hvparams)
8027
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
8028
    hv_type.CheckParameterSyntax(filled_hvp)
8029
    self.hv_full = filled_hvp
8030
    # check that we don't specify global parameters on an instance
8031
    _CheckGlobalHvParams(self.op.hvparams)
8032

    
8033
    # fill and remember the beparams dict
8034
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
8035
    self.be_full = cluster.SimpleFillBE(self.op.beparams)
8036

    
8037
    # build os parameters
8038
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
8039

    
8040
    # now that hvp/bep are in final format, let's reset to defaults,
8041
    # if told to do so
8042
    if self.op.identify_defaults:
8043
      self._RevertToDefaults(cluster)
8044

    
8045
    # NIC buildup
8046
    self.nics = []
8047
    for idx, nic in enumerate(self.op.nics):
8048
      nic_mode_req = nic.get(constants.INIC_MODE, None)
8049
      nic_mode = nic_mode_req
8050
      if nic_mode is None:
8051
        nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
8052

    
8053
      # in routed mode, for the first nic, the default ip is 'auto'
8054
      if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
8055
        default_ip_mode = constants.VALUE_AUTO
8056
      else:
8057
        default_ip_mode = constants.VALUE_NONE
8058

    
8059
      # ip validity checks
8060
      ip = nic.get(constants.INIC_IP, default_ip_mode)
8061
      if ip is None or ip.lower() == constants.VALUE_NONE:
8062
        nic_ip = None
8063
      elif ip.lower() == constants.VALUE_AUTO:
8064
        if not self.op.name_check:
8065
          raise errors.OpPrereqError("IP address set to auto but name checks"
8066
                                     " have been skipped",
8067
                                     errors.ECODE_INVAL)
8068
        nic_ip = self.hostname1.ip
8069
      else:
8070
        if not netutils.IPAddress.IsValid(ip):
8071
          raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
8072
                                     errors.ECODE_INVAL)
8073
        nic_ip = ip
8074

    
8075
      # TODO: check the ip address for uniqueness
8076
      if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
8077
        raise errors.OpPrereqError("Routed nic mode requires an ip address",
8078
                                   errors.ECODE_INVAL)
8079

    
8080
      # MAC address verification
8081
      mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
8082
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
8083
        mac = utils.NormalizeAndValidateMac(mac)
8084

    
8085
        try:
8086
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
8087
        except errors.ReservationError:
8088
          raise errors.OpPrereqError("MAC address %s already in use"
8089
                                     " in cluster" % mac,
8090
                                     errors.ECODE_NOTUNIQUE)
8091

    
8092
      #  Build nic parameters
8093
      link = nic.get(constants.INIC_LINK, None)
8094
      nicparams = {}
8095
      if nic_mode_req:
8096
        nicparams[constants.NIC_MODE] = nic_mode_req
8097
      if link:
8098
        nicparams[constants.NIC_LINK] = link
8099

    
8100
      check_params = cluster.SimpleFillNIC(nicparams)
8101
      objects.NIC.CheckParameterSyntax(check_params)
8102
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
8103

    
8104
    # disk checks/pre-build
8105
    default_vg = self.cfg.GetVGName()
8106
    self.disks = []
8107
    for disk in self.op.disks:
8108
      mode = disk.get(constants.IDISK_MODE, constants.DISK_RDWR)
8109
      if mode not in constants.DISK_ACCESS_SET:
8110
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
8111
                                   mode, errors.ECODE_INVAL)
8112
      size = disk.get(constants.IDISK_SIZE, None)
8113
      if size is None:
8114
        raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
8115
      try:
8116
        size = int(size)
8117
      except (TypeError, ValueError):
8118
        raise errors.OpPrereqError("Invalid disk size '%s'" % size,
8119
                                   errors.ECODE_INVAL)
8120

    
8121
      data_vg = disk.get(constants.IDISK_VG, default_vg)
8122
      new_disk = {
8123
        constants.IDISK_SIZE: size,
8124
        constants.IDISK_MODE: mode,
8125
        constants.IDISK_VG: data_vg,
8126
        constants.IDISK_METAVG: disk.get(constants.IDISK_METAVG, data_vg),
8127
        }
8128
      if constants.IDISK_ADOPT in disk:
8129
        new_disk[constants.IDISK_ADOPT] = disk[constants.IDISK_ADOPT]
8130
      self.disks.append(new_disk)
8131

    
8132
    if self.op.mode == constants.INSTANCE_IMPORT:
8133

    
8134
      # Check that the new instance doesn't have less disks than the export
8135
      instance_disks = len(self.disks)
8136
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
8137
      if instance_disks < export_disks:
8138
        raise errors.OpPrereqError("Not enough disks to import."
8139
                                   " (instance: %d, export: %d)" %
8140
                                   (instance_disks, export_disks),
8141
                                   errors.ECODE_INVAL)
8142

    
8143
      disk_images = []
8144
      for idx in range(export_disks):
8145
        option = 'disk%d_dump' % idx
8146
        if export_info.has_option(constants.INISECT_INS, option):
8147
          # FIXME: are the old os-es, disk sizes, etc. useful?
8148
          export_name = export_info.get(constants.INISECT_INS, option)
8149
          image = utils.PathJoin(self.op.src_path, export_name)
8150
          disk_images.append(image)
8151
        else:
8152
          disk_images.append(False)
8153

    
8154
      self.src_images = disk_images
8155

    
8156
      old_name = export_info.get(constants.INISECT_INS, 'name')
8157
      try:
8158
        exp_nic_count = export_info.getint(constants.INISECT_INS, 'nic_count')
8159
      except (TypeError, ValueError), err:
8160
        raise errors.OpPrereqError("Invalid export file, nic_count is not"
8161
                                   " an integer: %s" % str(err),
8162
                                   errors.ECODE_STATE)
8163
      if self.op.instance_name == old_name:
8164
        for idx, nic in enumerate(self.nics):
8165
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
8166
            nic_mac_ini = 'nic%d_mac' % idx
8167
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
8168

    
8169
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
8170

    
8171
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
8172
    if self.op.ip_check:
8173
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
8174
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
8175
                                   (self.check_ip, self.op.instance_name),
8176
                                   errors.ECODE_NOTUNIQUE)
8177

    
8178
    #### mac address generation
8179
    # By generating here the mac address both the allocator and the hooks get
8180
    # the real final mac address rather than the 'auto' or 'generate' value.
8181
    # There is a race condition between the generation and the instance object
8182
    # creation, which means that we know the mac is valid now, but we're not
8183
    # sure it will be when we actually add the instance. If things go bad
8184
    # adding the instance will abort because of a duplicate mac, and the
8185
    # creation job will fail.
8186
    for nic in self.nics:
8187
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
8188
        nic.mac = self.cfg.GenerateMAC(self.proc.GetECId())
8189

    
8190
    #### allocator run
8191

    
8192
    if self.op.iallocator is not None:
8193
      self._RunAllocator()
8194

    
8195
    #### node related checks
8196

    
8197
    # check primary node
8198
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
8199
    assert self.pnode is not None, \
8200
      "Cannot retrieve locked node %s" % self.op.pnode
8201
    if pnode.offline:
8202
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
8203
                                 pnode.name, errors.ECODE_STATE)
8204
    if pnode.drained:
8205
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
8206
                                 pnode.name, errors.ECODE_STATE)
8207
    if not pnode.vm_capable:
8208
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
8209
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
8210

    
8211
    self.secondaries = []
8212

    
8213
    # mirror node verification
8214
    if self.op.disk_template in constants.DTS_INT_MIRROR:
8215
      if self.op.snode == pnode.name:
8216
        raise errors.OpPrereqError("The secondary node cannot be the"
8217
                                   " primary node", errors.ECODE_INVAL)
8218
      _CheckNodeOnline(self, self.op.snode)
8219
      _CheckNodeNotDrained(self, self.op.snode)
8220
      _CheckNodeVmCapable(self, self.op.snode)
8221
      self.secondaries.append(self.op.snode)
8222

    
8223
    nodenames = [pnode.name] + self.secondaries
8224

    
8225
    if not self.adopt_disks:
8226
      # Check lv size requirements, if not adopting
8227
      req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
8228
      _CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
8229

    
8230
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
8231
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
8232
                                disk[constants.IDISK_ADOPT])
8233
                     for disk in self.disks])
8234
      if len(all_lvs) != len(self.disks):
8235
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
8236
                                   errors.ECODE_INVAL)
8237
      for lv_name in all_lvs:
8238
        try:
8239
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
8240
          # to ReserveLV uses the same syntax
8241
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
8242
        except errors.ReservationError:
8243
          raise errors.OpPrereqError("LV named %s used by another instance" %
8244
                                     lv_name, errors.ECODE_NOTUNIQUE)
8245

    
8246
      vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
8247
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
8248

    
8249
      node_lvs = self.rpc.call_lv_list([pnode.name],
8250
                                       vg_names.payload.keys())[pnode.name]
8251
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
8252
      node_lvs = node_lvs.payload
8253

    
8254
      delta = all_lvs.difference(node_lvs.keys())
8255
      if delta:
8256
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
8257
                                   utils.CommaJoin(delta),
8258
                                   errors.ECODE_INVAL)
8259
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
8260
      if online_lvs:
8261
        raise errors.OpPrereqError("Online logical volumes found, cannot"
8262
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
8263
                                   errors.ECODE_STATE)
8264
      # update the size of disk based on what is found
8265
      for dsk in self.disks:
8266
        dsk[constants.IDISK_SIZE] = \
8267
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
8268
                                        dsk[constants.IDISK_ADOPT])][0]))
8269

    
8270
    elif self.op.disk_template == constants.DT_BLOCK:
8271
      # Normalize and de-duplicate device paths
8272
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
8273
                       for disk in self.disks])
8274
      if len(all_disks) != len(self.disks):
8275
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
8276
                                   errors.ECODE_INVAL)
8277
      baddisks = [d for d in all_disks
8278
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
8279
      if baddisks:
8280
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
8281
                                   " cannot be adopted" %
8282
                                   (", ".join(baddisks),
8283
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
8284
                                   errors.ECODE_INVAL)
8285

    
8286
      node_disks = self.rpc.call_bdev_sizes([pnode.name],
8287
                                            list(all_disks))[pnode.name]
8288
      node_disks.Raise("Cannot get block device information from node %s" %
8289
                       pnode.name)
8290
      node_disks = node_disks.payload
8291
      delta = all_disks.difference(node_disks.keys())
8292
      if delta:
8293
        raise errors.OpPrereqError("Missing block device(s): %s" %
8294
                                   utils.CommaJoin(delta),
8295
                                   errors.ECODE_INVAL)
8296
      for dsk in self.disks:
8297
        dsk[constants.IDISK_SIZE] = \
8298
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
8299

    
8300
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
8301

    
8302
    _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
8303
    # check OS parameters (remotely)
8304
    _CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
8305

    
8306
    _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
8307

    
8308
    # memory check on primary node
8309
    if self.op.start:
8310
      _CheckNodeFreeMemory(self, self.pnode.name,
8311
                           "creating instance %s" % self.op.instance_name,
8312
                           self.be_full[constants.BE_MEMORY],
8313
                           self.op.hypervisor)
8314

    
8315
    self.dry_run_result = list(nodenames)
8316

    
8317
  def Exec(self, feedback_fn):
8318
    """Create and add the instance to the cluster.
8319

8320
    """
8321
    instance = self.op.instance_name
8322
    pnode_name = self.pnode.name
8323

    
8324
    ht_kind = self.op.hypervisor
8325
    if ht_kind in constants.HTS_REQ_PORT:
8326
      network_port = self.cfg.AllocatePort()
8327
    else:
8328
      network_port = None
8329

    
8330
    if constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE:
8331
      # this is needed because os.path.join does not accept None arguments
8332
      if self.op.file_storage_dir is None:
8333
        string_file_storage_dir = ""
8334
      else:
8335
        string_file_storage_dir = self.op.file_storage_dir
8336

    
8337
      # build the full file storage dir path
8338
      if self.op.disk_template == constants.DT_SHARED_FILE:
8339
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
8340
      else:
8341
        get_fsd_fn = self.cfg.GetFileStorageDir
8342

    
8343
      file_storage_dir = utils.PathJoin(get_fsd_fn(),
8344
                                        string_file_storage_dir, instance)
8345
    else:
8346
      file_storage_dir = ""
8347

    
8348
    disks = _GenerateDiskTemplate(self,
8349
                                  self.op.disk_template,
8350
                                  instance, pnode_name,
8351
                                  self.secondaries,
8352
                                  self.disks,
8353
                                  file_storage_dir,
8354
                                  self.op.file_driver,
8355
                                  0,
8356
                                  feedback_fn)
8357

    
8358
    iobj = objects.Instance(name=instance, os=self.op.os_type,
8359
                            primary_node=pnode_name,
8360
                            nics=self.nics, disks=disks,
8361
                            disk_template=self.op.disk_template,
8362
                            admin_up=False,
8363
                            network_port=network_port,
8364
                            beparams=self.op.beparams,
8365
                            hvparams=self.op.hvparams,
8366
                            hypervisor=self.op.hypervisor,
8367
                            osparams=self.op.osparams,
8368
                            )
8369

    
8370
    if self.adopt_disks:
8371
      if self.op.disk_template == constants.DT_PLAIN:
8372
        # rename LVs to the newly-generated names; we need to construct
8373
        # 'fake' LV disks with the old data, plus the new unique_id
8374
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
8375
        rename_to = []
8376
        for t_dsk, a_dsk in zip (tmp_disks, self.disks):
8377
          rename_to.append(t_dsk.logical_id)
8378
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
8379
          self.cfg.SetDiskID(t_dsk, pnode_name)
8380
        result = self.rpc.call_blockdev_rename(pnode_name,
8381
                                               zip(tmp_disks, rename_to))
8382
        result.Raise("Failed to rename adoped LVs")
8383
    else:
8384
      feedback_fn("* creating instance disks...")
8385
      try:
8386
        _CreateDisks(self, iobj)
8387
      except errors.OpExecError:
8388
        self.LogWarning("Device creation failed, reverting...")
8389
        try:
8390
          _RemoveDisks(self, iobj)
8391
        finally:
8392
          self.cfg.ReleaseDRBDMinors(instance)
8393
          raise
8394

    
8395
    feedback_fn("adding instance %s to cluster config" % instance)
8396

    
8397
    self.cfg.AddInstance(iobj, self.proc.GetECId())
8398

    
8399
    # Declare that we don't want to remove the instance lock anymore, as we've
8400
    # added the instance to the config
8401
    del self.remove_locks[locking.LEVEL_INSTANCE]
8402

    
8403
    if self.op.mode == constants.INSTANCE_IMPORT:
8404
      # Release unused nodes
8405
      _ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
8406
    else:
8407
      # Release all nodes
8408
      _ReleaseLocks(self, locking.LEVEL_NODE)
8409

    
8410
    disk_abort = False
8411
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
8412
      feedback_fn("* wiping instance disks...")
8413
      try:
8414
        _WipeDisks(self, iobj)
8415
      except errors.OpExecError, err:
8416
        logging.exception("Wiping disks failed")
8417
        self.LogWarning("Wiping instance disks failed (%s)", err)
8418
        disk_abort = True
8419

    
8420
    if disk_abort:
8421
      # Something is already wrong with the disks, don't do anything else
8422
      pass
8423
    elif self.op.wait_for_sync:
8424
      disk_abort = not _WaitForSync(self, iobj)
8425
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
8426
      # make sure the disks are not degraded (still sync-ing is ok)
8427
      time.sleep(15)
8428
      feedback_fn("* checking mirrors status")
8429
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
8430
    else:
8431
      disk_abort = False
8432

    
8433
    if disk_abort:
8434
      _RemoveDisks(self, iobj)
8435
      self.cfg.RemoveInstance(iobj.name)
8436
      # Make sure the instance lock gets removed
8437
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
8438
      raise errors.OpExecError("There are some degraded disks for"
8439
                               " this instance")
8440

    
8441
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
8442
      if self.op.mode == constants.INSTANCE_CREATE:
8443
        if not self.op.no_install:
8444
          feedback_fn("* running the instance OS create scripts...")
8445
          # FIXME: pass debug option from opcode to backend
8446
          result = self.rpc.call_instance_os_add(pnode_name, iobj, False,
8447
                                                 self.op.debug_level)
8448
          result.Raise("Could not add os for instance %s"
8449
                       " on node %s" % (instance, pnode_name))
8450

    
8451
      elif self.op.mode == constants.INSTANCE_IMPORT:
8452
        feedback_fn("* running the instance OS import scripts...")
8453

    
8454
        transfers = []
8455

    
8456
        for idx, image in enumerate(self.src_images):
8457
          if not image:
8458
            continue
8459

    
8460
          # FIXME: pass debug option from opcode to backend
8461
          dt = masterd.instance.DiskTransfer("disk/%s" % idx,
8462
                                             constants.IEIO_FILE, (image, ),
8463
                                             constants.IEIO_SCRIPT,
8464
                                             (iobj.disks[idx], idx),
8465
                                             None)
8466
          transfers.append(dt)
8467

    
8468
        import_result = \
8469
          masterd.instance.TransferInstanceData(self, feedback_fn,
8470
                                                self.op.src_node, pnode_name,
8471
                                                self.pnode.secondary_ip,
8472
                                                iobj, transfers)
8473
        if not compat.all(import_result):
8474
          self.LogWarning("Some disks for instance %s on node %s were not"
8475
                          " imported successfully" % (instance, pnode_name))
8476

    
8477
      elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
8478
        feedback_fn("* preparing remote import...")
8479
        # The source cluster will stop the instance before attempting to make a
8480
        # connection. In some cases stopping an instance can take a long time,
8481
        # hence the shutdown timeout is added to the connection timeout.
8482
        connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
8483
                           self.op.source_shutdown_timeout)
8484
        timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
8485

    
8486
        assert iobj.primary_node == self.pnode.name
8487
        disk_results = \
8488
          masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
8489
                                        self.source_x509_ca,
8490
                                        self._cds, timeouts)
8491
        if not compat.all(disk_results):
8492
          # TODO: Should the instance still be started, even if some disks
8493
          # failed to import (valid for local imports, too)?
8494
          self.LogWarning("Some disks for instance %s on node %s were not"
8495
                          " imported successfully" % (instance, pnode_name))
8496

    
8497
        # Run rename script on newly imported instance
8498
        assert iobj.name == instance
8499
        feedback_fn("Running rename script for %s" % instance)
8500
        result = self.rpc.call_instance_run_rename(pnode_name, iobj,
8501
                                                   self.source_instance_name,
8502
                                                   self.op.debug_level)
8503
        if result.fail_msg:
8504
          self.LogWarning("Failed to run rename script for %s on node"
8505
                          " %s: %s" % (instance, pnode_name, result.fail_msg))
8506

    
8507
      else:
8508
        # also checked in the prereq part
8509
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
8510
                                     % self.op.mode)
8511

    
8512
    if self.op.start:
8513
      iobj.admin_up = True
8514
      self.cfg.Update(iobj, feedback_fn)
8515
      logging.info("Starting instance %s on node %s", instance, pnode_name)
8516
      feedback_fn("* starting instance...")
8517
      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
8518
      result.Raise("Could not start instance")
8519

    
8520
    return list(iobj.all_nodes)
8521

    
8522

    
8523
class LUInstanceConsole(NoHooksLU):
8524
  """Connect to an instance's console.
8525

8526
  This is somewhat special in that it returns the command line that
8527
  you need to run on the master node in order to connect to the
8528
  console.
8529

8530
  """
8531
  REQ_BGL = False
8532

    
8533
  def ExpandNames(self):
8534
    self._ExpandAndLockInstance()
8535

    
8536
  def CheckPrereq(self):
8537
    """Check prerequisites.
8538

8539
    This checks that the instance is in the cluster.
8540

8541
    """
8542
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
8543
    assert self.instance is not None, \
8544
      "Cannot retrieve locked instance %s" % self.op.instance_name
8545
    _CheckNodeOnline(self, self.instance.primary_node)
8546

    
8547
  def Exec(self, feedback_fn):
8548
    """Connect to the console of an instance
8549

8550
    """
8551
    instance = self.instance
8552
    node = instance.primary_node
8553

    
8554
    node_insts = self.rpc.call_instance_list([node],
8555
                                             [instance.hypervisor])[node]
8556
    node_insts.Raise("Can't get node information from %s" % node)
8557

    
8558
    if instance.name not in node_insts.payload:
8559
      if instance.admin_up:
8560
        state = constants.INSTST_ERRORDOWN
8561
      else:
8562
        state = constants.INSTST_ADMINDOWN
8563
      raise errors.OpExecError("Instance %s is not running (state %s)" %
8564
                               (instance.name, state))
8565

    
8566
    logging.debug("Connecting to console of %s on %s", instance.name, node)
8567

    
8568
    return _GetInstanceConsole(self.cfg.GetClusterInfo(), instance)
8569

    
8570

    
8571
def _GetInstanceConsole(cluster, instance):
8572
  """Returns console information for an instance.
8573

8574
  @type cluster: L{objects.Cluster}
8575
  @type instance: L{objects.Instance}
8576
  @rtype: dict
8577

8578
  """
8579
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
8580
  # beparams and hvparams are passed separately, to avoid editing the
8581
  # instance and then saving the defaults in the instance itself.
8582
  hvparams = cluster.FillHV(instance)
8583
  beparams = cluster.FillBE(instance)
8584
  console = hyper.GetInstanceConsole(instance, hvparams, beparams)
8585

    
8586
  assert console.instance == instance.name
8587
  assert console.Validate()
8588

    
8589
  return console.ToDict()
8590

    
8591

    
8592
class LUInstanceReplaceDisks(LogicalUnit):
8593
  """Replace the disks of an instance.
8594

8595
  """
8596
  HPATH = "mirrors-replace"
8597
  HTYPE = constants.HTYPE_INSTANCE
8598
  REQ_BGL = False
8599

    
8600
  def CheckArguments(self):
8601
    TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
8602
                                  self.op.iallocator)
8603

    
8604
  def ExpandNames(self):
8605
    self._ExpandAndLockInstance()
8606

    
8607
    assert locking.LEVEL_NODE not in self.needed_locks
8608
    assert locking.LEVEL_NODEGROUP not in self.needed_locks
8609

    
8610
    assert self.op.iallocator is None or self.op.remote_node is None, \
8611
      "Conflicting options"
8612

    
8613
    if self.op.remote_node is not None:
8614
      self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
8615

    
8616
      # Warning: do not remove the locking of the new secondary here
8617
      # unless DRBD8.AddChildren is changed to work in parallel;
8618
      # currently it doesn't since parallel invocations of
8619
      # FindUnusedMinor will conflict
8620
      self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node]
8621
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
8622
    else:
8623
      self.needed_locks[locking.LEVEL_NODE] = []
8624
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8625

    
8626
      if self.op.iallocator is not None:
8627
        # iallocator will select a new node in the same group
8628
        self.needed_locks[locking.LEVEL_NODEGROUP] = []
8629

    
8630
    self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
8631
                                   self.op.iallocator, self.op.remote_node,
8632
                                   self.op.disks, False, self.op.early_release)
8633

    
8634
    self.tasklets = [self.replacer]
8635

    
8636
  def DeclareLocks(self, level):
8637
    if level == locking.LEVEL_NODEGROUP:
8638
      assert self.op.remote_node is None
8639
      assert self.op.iallocator is not None
8640
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
8641

    
8642
      self.share_locks[locking.LEVEL_NODEGROUP] = 1
8643
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
8644
        self.cfg.GetInstanceNodeGroups(self.op.instance_name)
8645

    
8646
    elif level == locking.LEVEL_NODE:
8647
      if self.op.iallocator is not None:
8648
        assert self.op.remote_node is None
8649
        assert not self.needed_locks[locking.LEVEL_NODE]
8650

    
8651
        # Lock member nodes of all locked groups
8652
        self.needed_locks[locking.LEVEL_NODE] = [node_name
8653
          for group_uuid in self.glm.list_owned(locking.LEVEL_NODEGROUP)
8654
          for node_name in self.cfg.GetNodeGroup(group_uuid).members]
8655
      else:
8656
        self._LockInstancesNodes()
8657

    
8658
  def BuildHooksEnv(self):
8659
    """Build hooks env.
8660

8661
    This runs on the master, the primary and all the secondaries.
8662

8663
    """
8664
    instance = self.replacer.instance
8665
    env = {
8666
      "MODE": self.op.mode,
8667
      "NEW_SECONDARY": self.op.remote_node,
8668
      "OLD_SECONDARY": instance.secondary_nodes[0],
8669
      }
8670
    env.update(_BuildInstanceHookEnvByObject(self, instance))
8671
    return env
8672

    
8673
  def BuildHooksNodes(self):
8674
    """Build hooks nodes.
8675

8676
    """
8677
    instance = self.replacer.instance
8678
    nl = [
8679
      self.cfg.GetMasterNode(),
8680
      instance.primary_node,
8681
      ]
8682
    if self.op.remote_node is not None:
8683
      nl.append(self.op.remote_node)
8684
    return nl, nl
8685

    
8686
  def CheckPrereq(self):
8687
    """Check prerequisites.
8688

8689
    """
8690
    assert (self.glm.is_owned(locking.LEVEL_NODEGROUP) or
8691
            self.op.iallocator is None)
8692

    
8693
    owned_groups = self.glm.list_owned(locking.LEVEL_NODEGROUP)
8694
    if owned_groups:
8695
      groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
8696
      if owned_groups != groups:
8697
        raise errors.OpExecError("Node groups used by instance '%s' changed"
8698
                                 " since lock was acquired, current list is %r,"
8699
                                 " used to be '%s'" %
8700
                                 (self.op.instance_name,
8701
                                  utils.CommaJoin(groups),
8702
                                  utils.CommaJoin(owned_groups)))
8703

    
8704
    return LogicalUnit.CheckPrereq(self)
8705

    
8706

    
8707
class TLReplaceDisks(Tasklet):
8708
  """Replaces disks for an instance.
8709

8710
  Note: Locking is not within the scope of this class.
8711

8712
  """
8713
  def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
8714
               disks, delay_iallocator, early_release):
8715
    """Initializes this class.
8716

8717
    """
8718
    Tasklet.__init__(self, lu)
8719

    
8720
    # Parameters
8721
    self.instance_name = instance_name
8722
    self.mode = mode
8723
    self.iallocator_name = iallocator_name
8724
    self.remote_node = remote_node
8725
    self.disks = disks
8726
    self.delay_iallocator = delay_iallocator
8727
    self.early_release = early_release
8728

    
8729
    # Runtime data
8730
    self.instance = None
8731
    self.new_node = None
8732
    self.target_node = None
8733
    self.other_node = None
8734
    self.remote_node_info = None
8735
    self.node_secondary_ip = None
8736

    
8737
  @staticmethod
8738
  def CheckArguments(mode, remote_node, iallocator):
8739
    """Helper function for users of this class.
8740

8741
    """
8742
    # check for valid parameter combination
8743
    if mode == constants.REPLACE_DISK_CHG:
8744
      if remote_node is None and iallocator is None:
8745
        raise errors.OpPrereqError("When changing the secondary either an"
8746
                                   " iallocator script must be used or the"
8747
                                   " new node given", errors.ECODE_INVAL)
8748

    
8749
      if remote_node is not None and iallocator is not None:
8750
        raise errors.OpPrereqError("Give either the iallocator or the new"
8751
                                   " secondary, not both", errors.ECODE_INVAL)
8752

    
8753
    elif remote_node is not None or iallocator is not None:
8754
      # Not replacing the secondary
8755
      raise errors.OpPrereqError("The iallocator and new node options can"
8756
                                 " only be used when changing the"
8757
                                 " secondary node", errors.ECODE_INVAL)
8758

    
8759
  @staticmethod
8760
  def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
8761
    """Compute a new secondary node using an IAllocator.
8762

8763
    """
8764
    ial = IAllocator(lu.cfg, lu.rpc,
8765
                     mode=constants.IALLOCATOR_MODE_RELOC,
8766
                     name=instance_name,
8767
                     relocate_from=relocate_from)
8768

    
8769
    ial.Run(iallocator_name)
8770

    
8771
    if not ial.success:
8772
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
8773
                                 " %s" % (iallocator_name, ial.info),
8774
                                 errors.ECODE_NORES)
8775

    
8776
    if len(ial.result) != ial.required_nodes:
8777
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
8778
                                 " of nodes (%s), required %s" %
8779
                                 (iallocator_name,
8780
                                  len(ial.result), ial.required_nodes),
8781
                                 errors.ECODE_FAULT)
8782

    
8783
    remote_node_name = ial.result[0]
8784

    
8785
    lu.LogInfo("Selected new secondary for instance '%s': %s",
8786
               instance_name, remote_node_name)
8787

    
8788
    return remote_node_name
8789

    
8790
  def _FindFaultyDisks(self, node_name):
8791
    return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
8792
                                    node_name, True)
8793

    
8794
  def _CheckDisksActivated(self, instance):
8795
    """Checks if the instance disks are activated.
8796

8797
    @param instance: The instance to check disks
8798
    @return: True if they are activated, False otherwise
8799

8800
    """
8801
    nodes = instance.all_nodes
8802

    
8803
    for idx, dev in enumerate(instance.disks):
8804
      for node in nodes:
8805
        self.lu.LogInfo("Checking disk/%d on %s", idx, node)
8806
        self.cfg.SetDiskID(dev, node)
8807

    
8808
        result = self.rpc.call_blockdev_find(node, dev)
8809

    
8810
        if result.offline:
8811
          continue
8812
        elif result.fail_msg or not result.payload:
8813
          return False
8814

    
8815
    return True
8816

    
8817
  def CheckPrereq(self):
8818
    """Check prerequisites.
8819

8820
    This checks that the instance is in the cluster.
8821

8822
    """
8823
    self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
8824
    assert instance is not None, \
8825
      "Cannot retrieve locked instance %s" % self.instance_name
8826

    
8827
    if instance.disk_template != constants.DT_DRBD8:
8828
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
8829
                                 " instances", errors.ECODE_INVAL)
8830

    
8831
    if len(instance.secondary_nodes) != 1:
8832
      raise errors.OpPrereqError("The instance has a strange layout,"
8833
                                 " expected one secondary but found %d" %
8834
                                 len(instance.secondary_nodes),
8835
                                 errors.ECODE_FAULT)
8836

    
8837
    if not self.delay_iallocator:
8838
      self._CheckPrereq2()
8839

    
8840
  def _CheckPrereq2(self):
8841
    """Check prerequisites, second part.
8842

8843
    This function should always be part of CheckPrereq. It was separated and is
8844
    now called from Exec because during node evacuation iallocator was only
8845
    called with an unmodified cluster model, not taking planned changes into
8846
    account.
8847

8848
    """
8849
    instance = self.instance
8850
    secondary_node = instance.secondary_nodes[0]
8851

    
8852
    if self.iallocator_name is None:
8853
      remote_node = self.remote_node
8854
    else:
8855
      remote_node = self._RunAllocator(self.lu, self.iallocator_name,
8856
                                       instance.name, instance.secondary_nodes)
8857

    
8858
    if remote_node is None:
8859
      self.remote_node_info = None
8860
    else:
8861
      assert remote_node in self.lu.glm.list_owned(locking.LEVEL_NODE), \
8862
             "Remote node '%s' is not locked" % remote_node
8863

    
8864
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
8865
      assert self.remote_node_info is not None, \
8866
        "Cannot retrieve locked node %s" % remote_node
8867

    
8868
    if remote_node == self.instance.primary_node:
8869
      raise errors.OpPrereqError("The specified node is the primary node of"
8870
                                 " the instance", errors.ECODE_INVAL)
8871

    
8872
    if remote_node == secondary_node:
8873
      raise errors.OpPrereqError("The specified node is already the"
8874
                                 " secondary node of the instance",
8875
                                 errors.ECODE_INVAL)
8876

    
8877
    if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
8878
                                    constants.REPLACE_DISK_CHG):
8879
      raise errors.OpPrereqError("Cannot specify disks to be replaced",
8880
                                 errors.ECODE_INVAL)
8881

    
8882
    if self.mode == constants.REPLACE_DISK_AUTO:
8883
      if not self._CheckDisksActivated(instance):
8884
        raise errors.OpPrereqError("Please run activate-disks on instance %s"
8885
                                   " first" % self.instance_name,
8886
                                   errors.ECODE_STATE)
8887
      faulty_primary = self._FindFaultyDisks(instance.primary_node)
8888
      faulty_secondary = self._FindFaultyDisks(secondary_node)
8889

    
8890
      if faulty_primary and faulty_secondary:
8891
        raise errors.OpPrereqError("Instance %s has faulty disks on more than"
8892
                                   " one node and can not be repaired"
8893
                                   " automatically" % self.instance_name,
8894
                                   errors.ECODE_STATE)
8895

    
8896
      if faulty_primary:
8897
        self.disks = faulty_primary
8898
        self.target_node = instance.primary_node
8899
        self.other_node = secondary_node
8900
        check_nodes = [self.target_node, self.other_node]
8901
      elif faulty_secondary:
8902
        self.disks = faulty_secondary
8903
        self.target_node = secondary_node
8904
        self.other_node = instance.primary_node
8905
        check_nodes = [self.target_node, self.other_node]
8906
      else:
8907
        self.disks = []
8908
        check_nodes = []
8909

    
8910
    else:
8911
      # Non-automatic modes
8912
      if self.mode == constants.REPLACE_DISK_PRI:
8913
        self.target_node = instance.primary_node
8914
        self.other_node = secondary_node
8915
        check_nodes = [self.target_node, self.other_node]
8916

    
8917
      elif self.mode == constants.REPLACE_DISK_SEC:
8918
        self.target_node = secondary_node
8919
        self.other_node = instance.primary_node
8920
        check_nodes = [self.target_node, self.other_node]
8921

    
8922
      elif self.mode == constants.REPLACE_DISK_CHG:
8923
        self.new_node = remote_node
8924
        self.other_node = instance.primary_node
8925
        self.target_node = secondary_node
8926
        check_nodes = [self.new_node, self.other_node]
8927

    
8928
        _CheckNodeNotDrained(self.lu, remote_node)
8929
        _CheckNodeVmCapable(self.lu, remote_node)
8930

    
8931
        old_node_info = self.cfg.GetNodeInfo(secondary_node)
8932
        assert old_node_info is not None
8933
        if old_node_info.offline and not self.early_release:
8934
          # doesn't make sense to delay the release
8935
          self.early_release = True
8936
          self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
8937
                          " early-release mode", secondary_node)
8938

    
8939
      else:
8940
        raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
8941
                                     self.mode)
8942

    
8943
      # If not specified all disks should be replaced
8944
      if not self.disks:
8945
        self.disks = range(len(self.instance.disks))
8946

    
8947
    for node in check_nodes:
8948
      _CheckNodeOnline(self.lu, node)
8949

    
8950
    touched_nodes = frozenset(node_name for node_name in [self.new_node,
8951
                                                          self.other_node,
8952
                                                          self.target_node]
8953
                              if node_name is not None)
8954

    
8955
    # Release unneeded node locks
8956
    _ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes)
8957

    
8958
    # Release any owned node group
8959
    if self.lu.glm.is_owned(locking.LEVEL_NODEGROUP):
8960
      _ReleaseLocks(self.lu, locking.LEVEL_NODEGROUP)
8961

    
8962
    # Check whether disks are valid
8963
    for disk_idx in self.disks:
8964
      instance.FindDisk(disk_idx)
8965

    
8966
    # Get secondary node IP addresses
8967
    self.node_secondary_ip = \
8968
      dict((node_name, self.cfg.GetNodeInfo(node_name).secondary_ip)
8969
           for node_name in touched_nodes)
8970

    
8971
  def Exec(self, feedback_fn):
8972
    """Execute disk replacement.
8973

8974
    This dispatches the disk replacement to the appropriate handler.
8975

8976
    """
8977
    if self.delay_iallocator:
8978
      self._CheckPrereq2()
8979

    
8980
    if __debug__:
8981
      # Verify owned locks before starting operation
8982
      owned_locks = self.lu.glm.list_owned(locking.LEVEL_NODE)
8983
      assert set(owned_locks) == set(self.node_secondary_ip), \
8984
          ("Incorrect node locks, owning %s, expected %s" %
8985
           (owned_locks, self.node_secondary_ip.keys()))
8986

    
8987
      owned_locks = self.lu.glm.list_owned(locking.LEVEL_INSTANCE)
8988
      assert list(owned_locks) == [self.instance_name], \
8989
          "Instance '%s' not locked" % self.instance_name
8990

    
8991
      assert not self.lu.glm.is_owned(locking.LEVEL_NODEGROUP), \
8992
          "Should not own any node group lock at this point"
8993

    
8994
    if not self.disks:
8995
      feedback_fn("No disks need replacement")
8996
      return
8997

    
8998
    feedback_fn("Replacing disk(s) %s for %s" %
8999
                (utils.CommaJoin(self.disks), self.instance.name))
9000

    
9001
    activate_disks = (not self.instance.admin_up)
9002

    
9003
    # Activate the instance disks if we're replacing them on a down instance
9004
    if activate_disks:
9005
      _StartInstanceDisks(self.lu, self.instance, True)
9006

    
9007
    try:
9008
      # Should we replace the secondary node?
9009
      if self.new_node is not None:
9010
        fn = self._ExecDrbd8Secondary
9011
      else:
9012
        fn = self._ExecDrbd8DiskOnly
9013

    
9014
      result = fn(feedback_fn)
9015
    finally:
9016
      # Deactivate the instance disks if we're replacing them on a
9017
      # down instance
9018
      if activate_disks:
9019
        _SafeShutdownInstanceDisks(self.lu, self.instance)
9020

    
9021
    if __debug__:
9022
      # Verify owned locks
9023
      owned_locks = self.lu.glm.list_owned(locking.LEVEL_NODE)
9024
      nodes = frozenset(self.node_secondary_ip)
9025
      assert ((self.early_release and not owned_locks) or
9026
              (not self.early_release and not (set(owned_locks) - nodes))), \
9027
        ("Not owning the correct locks, early_release=%s, owned=%r,"
9028
         " nodes=%r" % (self.early_release, owned_locks, nodes))
9029

    
9030
    return result
9031

    
9032
  def _CheckVolumeGroup(self, nodes):
9033
    self.lu.LogInfo("Checking volume groups")
9034

    
9035
    vgname = self.cfg.GetVGName()
9036

    
9037
    # Make sure volume group exists on all involved nodes
9038
    results = self.rpc.call_vg_list(nodes)
9039
    if not results:
9040
      raise errors.OpExecError("Can't list volume groups on the nodes")
9041

    
9042
    for node in nodes:
9043
      res = results[node]
9044
      res.Raise("Error checking node %s" % node)
9045
      if vgname not in res.payload:
9046
        raise errors.OpExecError("Volume group '%s' not found on node %s" %
9047
                                 (vgname, node))
9048

    
9049
  def _CheckDisksExistence(self, nodes):
9050
    # Check disk existence
9051
    for idx, dev in enumerate(self.instance.disks):
9052
      if idx not in self.disks:
9053
        continue
9054

    
9055
      for node in nodes:
9056
        self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
9057
        self.cfg.SetDiskID(dev, node)
9058

    
9059
        result = self.rpc.call_blockdev_find(node, dev)
9060

    
9061
        msg = result.fail_msg
9062
        if msg or not result.payload:
9063
          if not msg:
9064
            msg = "disk not found"
9065
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
9066
                                   (idx, node, msg))
9067

    
9068
  def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
9069
    for idx, dev in enumerate(self.instance.disks):
9070
      if idx not in self.disks:
9071
        continue
9072

    
9073
      self.lu.LogInfo("Checking disk/%d consistency on node %s" %
9074
                      (idx, node_name))
9075

    
9076
      if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary,
9077
                                   ldisk=ldisk):
9078
        raise errors.OpExecError("Node %s has degraded storage, unsafe to"
9079
                                 " replace disks for instance %s" %
9080
                                 (node_name, self.instance.name))
9081

    
9082
  def _CreateNewStorage(self, node_name):
9083
    iv_names = {}
9084

    
9085
    for idx, dev in enumerate(self.instance.disks):
9086
      if idx not in self.disks:
9087
        continue
9088

    
9089
      self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
9090

    
9091
      self.cfg.SetDiskID(dev, node_name)
9092

    
9093
      lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
9094
      names = _GenerateUniqueNames(self.lu, lv_names)
9095

    
9096
      vg_data = dev.children[0].logical_id[0]
9097
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
9098
                             logical_id=(vg_data, names[0]))
9099
      vg_meta = dev.children[1].logical_id[0]
9100
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
9101
                             logical_id=(vg_meta, names[1]))
9102

    
9103
      new_lvs = [lv_data, lv_meta]
9104
      old_lvs = dev.children
9105
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
9106

    
9107
      # we pass force_create=True to force the LVM creation
9108
      for new_lv in new_lvs:
9109
        _CreateBlockDev(self.lu, node_name, self.instance, new_lv, True,
9110
                        _GetInstanceInfoText(self.instance), False)
9111

    
9112
    return iv_names
9113

    
9114
  def _CheckDevices(self, node_name, iv_names):
9115
    for name, (dev, _, _) in iv_names.iteritems():
9116
      self.cfg.SetDiskID(dev, node_name)
9117

    
9118
      result = self.rpc.call_blockdev_find(node_name, dev)
9119

    
9120
      msg = result.fail_msg
9121
      if msg or not result.payload:
9122
        if not msg:
9123
          msg = "disk not found"
9124
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
9125
                                 (name, msg))
9126

    
9127
      if result.payload.is_degraded:
9128
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
9129

    
9130
  def _RemoveOldStorage(self, node_name, iv_names):
9131
    for name, (_, old_lvs, _) in iv_names.iteritems():
9132
      self.lu.LogInfo("Remove logical volumes for %s" % name)
9133

    
9134
      for lv in old_lvs:
9135
        self.cfg.SetDiskID(lv, node_name)
9136

    
9137
        msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
9138
        if msg:
9139
          self.lu.LogWarning("Can't remove old LV: %s" % msg,
9140
                             hint="remove unused LVs manually")
9141

    
9142
  def _ExecDrbd8DiskOnly(self, feedback_fn):
9143
    """Replace a disk on the primary or secondary for DRBD 8.
9144

9145
    The algorithm for replace is quite complicated:
9146

9147
      1. for each disk to be replaced:
9148

9149
        1. create new LVs on the target node with unique names
9150
        1. detach old LVs from the drbd device
9151
        1. rename old LVs to name_replaced.<time_t>
9152
        1. rename new LVs to old LVs
9153
        1. attach the new LVs (with the old names now) to the drbd device
9154

9155
      1. wait for sync across all devices
9156

9157
      1. for each modified disk:
9158

9159
        1. remove old LVs (which have the name name_replaces.<time_t>)
9160

9161
    Failures are not very well handled.
9162

9163
    """
9164
    steps_total = 6
9165

    
9166
    # Step: check device activation
9167
    self.lu.LogStep(1, steps_total, "Check device existence")
9168
    self._CheckDisksExistence([self.other_node, self.target_node])
9169
    self._CheckVolumeGroup([self.target_node, self.other_node])
9170

    
9171
    # Step: check other node consistency
9172
    self.lu.LogStep(2, steps_total, "Check peer consistency")
9173
    self._CheckDisksConsistency(self.other_node,
9174
                                self.other_node == self.instance.primary_node,
9175
                                False)
9176

    
9177
    # Step: create new storage
9178
    self.lu.LogStep(3, steps_total, "Allocate new storage")
9179
    iv_names = self._CreateNewStorage(self.target_node)
9180

    
9181
    # Step: for each lv, detach+rename*2+attach
9182
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
9183
    for dev, old_lvs, new_lvs in iv_names.itervalues():
9184
      self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
9185

    
9186
      result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
9187
                                                     old_lvs)
9188
      result.Raise("Can't detach drbd from local storage on node"
9189
                   " %s for device %s" % (self.target_node, dev.iv_name))
9190
      #dev.children = []
9191
      #cfg.Update(instance)
9192

    
9193
      # ok, we created the new LVs, so now we know we have the needed
9194
      # storage; as such, we proceed on the target node to rename
9195
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
9196
      # using the assumption that logical_id == physical_id (which in
9197
      # turn is the unique_id on that node)
9198

    
9199
      # FIXME(iustin): use a better name for the replaced LVs
9200
      temp_suffix = int(time.time())
9201
      ren_fn = lambda d, suff: (d.physical_id[0],
9202
                                d.physical_id[1] + "_replaced-%s" % suff)
9203

    
9204
      # Build the rename list based on what LVs exist on the node
9205
      rename_old_to_new = []
9206
      for to_ren in old_lvs:
9207
        result = self.rpc.call_blockdev_find(self.target_node, to_ren)
9208
        if not result.fail_msg and result.payload:
9209
          # device exists
9210
          rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
9211

    
9212
      self.lu.LogInfo("Renaming the old LVs on the target node")
9213
      result = self.rpc.call_blockdev_rename(self.target_node,
9214
                                             rename_old_to_new)
9215
      result.Raise("Can't rename old LVs on node %s" % self.target_node)
9216

    
9217
      # Now we rename the new LVs to the old LVs
9218
      self.lu.LogInfo("Renaming the new LVs on the target node")
9219
      rename_new_to_old = [(new, old.physical_id)
9220
                           for old, new in zip(old_lvs, new_lvs)]
9221
      result = self.rpc.call_blockdev_rename(self.target_node,
9222
                                             rename_new_to_old)
9223
      result.Raise("Can't rename new LVs on node %s" % self.target_node)
9224

    
9225
      for old, new in zip(old_lvs, new_lvs):
9226
        new.logical_id = old.logical_id
9227
        self.cfg.SetDiskID(new, self.target_node)
9228

    
9229
      for disk in old_lvs:
9230
        disk.logical_id = ren_fn(disk, temp_suffix)
9231
        self.cfg.SetDiskID(disk, self.target_node)
9232

    
9233
      # Now that the new lvs have the old name, we can add them to the device
9234
      self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
9235
      result = self.rpc.call_blockdev_addchildren(self.target_node, dev,
9236
                                                  new_lvs)
9237
      msg = result.fail_msg
9238
      if msg:
9239
        for new_lv in new_lvs:
9240
          msg2 = self.rpc.call_blockdev_remove(self.target_node,
9241
                                               new_lv).fail_msg
9242
          if msg2:
9243
            self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
9244
                               hint=("cleanup manually the unused logical"
9245
                                     "volumes"))
9246
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
9247

    
9248
      dev.children = new_lvs
9249

    
9250
      self.cfg.Update(self.instance, feedback_fn)
9251

    
9252
    cstep = 5
9253
    if self.early_release:
9254
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
9255
      cstep += 1
9256
      self._RemoveOldStorage(self.target_node, iv_names)
9257
      # WARNING: we release both node locks here, do not do other RPCs
9258
      # than WaitForSync to the primary node
9259
      _ReleaseLocks(self.lu, locking.LEVEL_NODE,
9260
                    names=[self.target_node, self.other_node])
9261

    
9262
    # Wait for sync
9263
    # This can fail as the old devices are degraded and _WaitForSync
9264
    # does a combined result over all disks, so we don't check its return value
9265
    self.lu.LogStep(cstep, steps_total, "Sync devices")
9266
    cstep += 1
9267
    _WaitForSync(self.lu, self.instance)
9268

    
9269
    # Check all devices manually
9270
    self._CheckDevices(self.instance.primary_node, iv_names)
9271

    
9272
    # Step: remove old storage
9273
    if not self.early_release:
9274
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
9275
      cstep += 1
9276
      self._RemoveOldStorage(self.target_node, iv_names)
9277

    
9278
  def _ExecDrbd8Secondary(self, feedback_fn):
9279
    """Replace the secondary node for DRBD 8.
9280

9281
    The algorithm for replace is quite complicated:
9282
      - for all disks of the instance:
9283
        - create new LVs on the new node with same names
9284
        - shutdown the drbd device on the old secondary
9285
        - disconnect the drbd network on the primary
9286
        - create the drbd device on the new secondary
9287
        - network attach the drbd on the primary, using an artifice:
9288
          the drbd code for Attach() will connect to the network if it
9289
          finds a device which is connected to the good local disks but
9290
          not network enabled
9291
      - wait for sync across all devices
9292
      - remove all disks from the old secondary
9293

9294
    Failures are not very well handled.
9295

9296
    """
9297
    steps_total = 6
9298

    
9299
    # Step: check device activation
9300
    self.lu.LogStep(1, steps_total, "Check device existence")
9301
    self._CheckDisksExistence([self.instance.primary_node])
9302
    self._CheckVolumeGroup([self.instance.primary_node])
9303

    
9304
    # Step: check other node consistency
9305
    self.lu.LogStep(2, steps_total, "Check peer consistency")
9306
    self._CheckDisksConsistency(self.instance.primary_node, True, True)
9307

    
9308
    # Step: create new storage
9309
    self.lu.LogStep(3, steps_total, "Allocate new storage")
9310
    for idx, dev in enumerate(self.instance.disks):
9311
      self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
9312
                      (self.new_node, idx))
9313
      # we pass force_create=True to force LVM creation
9314
      for new_lv in dev.children:
9315
        _CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True,
9316
                        _GetInstanceInfoText(self.instance), False)
9317

    
9318
    # Step 4: dbrd minors and drbd setups changes
9319
    # after this, we must manually remove the drbd minors on both the
9320
    # error and the success paths
9321
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
9322
    minors = self.cfg.AllocateDRBDMinor([self.new_node
9323
                                         for dev in self.instance.disks],
9324
                                        self.instance.name)
9325
    logging.debug("Allocated minors %r", minors)
9326

    
9327
    iv_names = {}
9328
    for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
9329
      self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
9330
                      (self.new_node, idx))
9331
      # create new devices on new_node; note that we create two IDs:
9332
      # one without port, so the drbd will be activated without
9333
      # networking information on the new node at this stage, and one
9334
      # with network, for the latter activation in step 4
9335
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
9336
      if self.instance.primary_node == o_node1:
9337
        p_minor = o_minor1
9338
      else:
9339
        assert self.instance.primary_node == o_node2, "Three-node instance?"
9340
        p_minor = o_minor2
9341

    
9342
      new_alone_id = (self.instance.primary_node, self.new_node, None,
9343
                      p_minor, new_minor, o_secret)
9344
      new_net_id = (self.instance.primary_node, self.new_node, o_port,
9345
                    p_minor, new_minor, o_secret)
9346

    
9347
      iv_names[idx] = (dev, dev.children, new_net_id)
9348
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
9349
                    new_net_id)
9350
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
9351
                              logical_id=new_alone_id,
9352
                              children=dev.children,
9353
                              size=dev.size)
9354
      try:
9355
        _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
9356
                              _GetInstanceInfoText(self.instance), False)
9357
      except errors.GenericError:
9358
        self.cfg.ReleaseDRBDMinors(self.instance.name)
9359
        raise
9360

    
9361
    # We have new devices, shutdown the drbd on the old secondary
9362
    for idx, dev in enumerate(self.instance.disks):
9363
      self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
9364
      self.cfg.SetDiskID(dev, self.target_node)
9365
      msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
9366
      if msg:
9367
        self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
9368
                           "node: %s" % (idx, msg),
9369
                           hint=("Please cleanup this device manually as"
9370
                                 " soon as possible"))
9371

    
9372
    self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
9373
    result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node],
9374
                                               self.node_secondary_ip,
9375
                                               self.instance.disks)\
9376
                                              [self.instance.primary_node]
9377

    
9378
    msg = result.fail_msg
9379
    if msg:
9380
      # detaches didn't succeed (unlikely)
9381
      self.cfg.ReleaseDRBDMinors(self.instance.name)
9382
      raise errors.OpExecError("Can't detach the disks from the network on"
9383
                               " old node: %s" % (msg,))
9384

    
9385
    # if we managed to detach at least one, we update all the disks of
9386
    # the instance to point to the new secondary
9387
    self.lu.LogInfo("Updating instance configuration")
9388
    for dev, _, new_logical_id in iv_names.itervalues():
9389
      dev.logical_id = new_logical_id
9390
      self.cfg.SetDiskID(dev, self.instance.primary_node)
9391

    
9392
    self.cfg.Update(self.instance, feedback_fn)
9393

    
9394
    # and now perform the drbd attach
9395
    self.lu.LogInfo("Attaching primary drbds to new secondary"
9396
                    " (standalone => connected)")
9397
    result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
9398
                                            self.new_node],
9399
                                           self.node_secondary_ip,
9400
                                           self.instance.disks,
9401
                                           self.instance.name,
9402
                                           False)
9403
    for to_node, to_result in result.items():
9404
      msg = to_result.fail_msg
9405
      if msg:
9406
        self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
9407
                           to_node, msg,
9408
                           hint=("please do a gnt-instance info to see the"
9409
                                 " status of disks"))
9410
    cstep = 5
9411
    if self.early_release:
9412
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
9413
      cstep += 1
9414
      self._RemoveOldStorage(self.target_node, iv_names)
9415
      # WARNING: we release all node locks here, do not do other RPCs
9416
      # than WaitForSync to the primary node
9417
      _ReleaseLocks(self.lu, locking.LEVEL_NODE,
9418
                    names=[self.instance.primary_node,
9419
                           self.target_node,
9420
                           self.new_node])
9421

    
9422
    # Wait for sync
9423
    # This can fail as the old devices are degraded and _WaitForSync
9424
    # does a combined result over all disks, so we don't check its return value
9425
    self.lu.LogStep(cstep, steps_total, "Sync devices")
9426
    cstep += 1
9427
    _WaitForSync(self.lu, self.instance)
9428

    
9429
    # Check all devices manually
9430
    self._CheckDevices(self.instance.primary_node, iv_names)
9431

    
9432
    # Step: remove old storage
9433
    if not self.early_release:
9434
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
9435
      self._RemoveOldStorage(self.target_node, iv_names)
9436

    
9437

    
9438
class LURepairNodeStorage(NoHooksLU):
9439
  """Repairs the volume group on a node.
9440

9441
  """
9442
  REQ_BGL = False
9443

    
9444
  def CheckArguments(self):
9445
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
9446

    
9447
    storage_type = self.op.storage_type
9448

    
9449
    if (constants.SO_FIX_CONSISTENCY not in
9450
        constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
9451
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
9452
                                 " repaired" % storage_type,
9453
                                 errors.ECODE_INVAL)
9454

    
9455
  def ExpandNames(self):
9456
    self.needed_locks = {
9457
      locking.LEVEL_NODE: [self.op.node_name],
9458
      }
9459

    
9460
  def _CheckFaultyDisks(self, instance, node_name):
9461
    """Ensure faulty disks abort the opcode or at least warn."""
9462
    try:
9463
      if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
9464
                                  node_name, True):
9465
        raise errors.OpPrereqError("Instance '%s' has faulty disks on"
9466
                                   " node '%s'" % (instance.name, node_name),
9467
                                   errors.ECODE_STATE)
9468
    except errors.OpPrereqError, err:
9469
      if self.op.ignore_consistency:
9470
        self.proc.LogWarning(str(err.args[0]))
9471
      else:
9472
        raise
9473

    
9474
  def CheckPrereq(self):
9475
    """Check prerequisites.
9476

9477
    """
9478
    # Check whether any instance on this node has faulty disks
9479
    for inst in _GetNodeInstances(self.cfg, self.op.node_name):
9480
      if not inst.admin_up:
9481
        continue
9482
      check_nodes = set(inst.all_nodes)
9483
      check_nodes.discard(self.op.node_name)
9484
      for inst_node_name in check_nodes:
9485
        self._CheckFaultyDisks(inst, inst_node_name)
9486

    
9487
  def Exec(self, feedback_fn):
9488
    feedback_fn("Repairing storage unit '%s' on %s ..." %
9489
                (self.op.name, self.op.node_name))
9490

    
9491
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
9492
    result = self.rpc.call_storage_execute(self.op.node_name,
9493
                                           self.op.storage_type, st_args,
9494
                                           self.op.name,
9495
                                           constants.SO_FIX_CONSISTENCY)
9496
    result.Raise("Failed to repair storage unit '%s' on %s" %
9497
                 (self.op.name, self.op.node_name))
9498

    
9499

    
9500
class LUNodeEvacStrategy(NoHooksLU):
9501
  """Computes the node evacuation strategy.
9502

9503
  """
9504
  REQ_BGL = False
9505

    
9506
  def CheckArguments(self):
9507
    _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
9508

    
9509
  def ExpandNames(self):
9510
    self.op.nodes = _GetWantedNodes(self, self.op.nodes)
9511
    self.needed_locks = locks = {}
9512
    if self.op.remote_node is None:
9513
      locks[locking.LEVEL_NODE] = locking.ALL_SET
9514
    else:
9515
      self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
9516
      locks[locking.LEVEL_NODE] = self.op.nodes + [self.op.remote_node]
9517

    
9518
  def Exec(self, feedback_fn):
9519
    if self.op.remote_node is not None:
9520
      instances = []
9521
      for node in self.op.nodes:
9522
        instances.extend(_GetNodeSecondaryInstances(self.cfg, node))
9523
      result = []
9524
      for i in instances:
9525
        if i.primary_node == self.op.remote_node:
9526
          raise errors.OpPrereqError("Node %s is the primary node of"
9527
                                     " instance %s, cannot use it as"
9528
                                     " secondary" %
9529
                                     (self.op.remote_node, i.name),
9530
                                     errors.ECODE_INVAL)
9531
        result.append([i.name, self.op.remote_node])
9532
    else:
9533
      ial = IAllocator(self.cfg, self.rpc,
9534
                       mode=constants.IALLOCATOR_MODE_MEVAC,
9535
                       evac_nodes=self.op.nodes)
9536
      ial.Run(self.op.iallocator, validate=True)
9537
      if not ial.success:
9538
        raise errors.OpExecError("No valid evacuation solution: %s" % ial.info,
9539
                                 errors.ECODE_NORES)
9540
      result = ial.result
9541
    return result
9542

    
9543

    
9544
class LUInstanceGrowDisk(LogicalUnit):
9545
  """Grow a disk of an instance.
9546

9547
  """
9548
  HPATH = "disk-grow"
9549
  HTYPE = constants.HTYPE_INSTANCE
9550
  REQ_BGL = False
9551

    
9552
  def ExpandNames(self):
9553
    self._ExpandAndLockInstance()
9554
    self.needed_locks[locking.LEVEL_NODE] = []
9555
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
9556

    
9557
  def DeclareLocks(self, level):
9558
    if level == locking.LEVEL_NODE:
9559
      self._LockInstancesNodes()
9560

    
9561
  def BuildHooksEnv(self):
9562
    """Build hooks env.
9563

9564
    This runs on the master, the primary and all the secondaries.
9565

9566
    """
9567
    env = {
9568
      "DISK": self.op.disk,
9569
      "AMOUNT": self.op.amount,
9570
      }
9571
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
9572
    return env
9573

    
9574
  def BuildHooksNodes(self):
9575
    """Build hooks nodes.
9576

9577
    """
9578
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
9579
    return (nl, nl)
9580

    
9581
  def CheckPrereq(self):
9582
    """Check prerequisites.
9583

9584
    This checks that the instance is in the cluster.
9585

9586
    """
9587
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
9588
    assert instance is not None, \
9589
      "Cannot retrieve locked instance %s" % self.op.instance_name
9590
    nodenames = list(instance.all_nodes)
9591
    for node in nodenames:
9592
      _CheckNodeOnline(self, node)
9593

    
9594
    self.instance = instance
9595

    
9596
    if instance.disk_template not in constants.DTS_GROWABLE:
9597
      raise errors.OpPrereqError("Instance's disk layout does not support"
9598
                                 " growing", errors.ECODE_INVAL)
9599

    
9600
    self.disk = instance.FindDisk(self.op.disk)
9601

    
9602
    if instance.disk_template not in (constants.DT_FILE,
9603
                                      constants.DT_SHARED_FILE):
9604
      # TODO: check the free disk space for file, when that feature will be
9605
      # supported
9606
      _CheckNodesFreeDiskPerVG(self, nodenames,
9607
                               self.disk.ComputeGrowth(self.op.amount))
9608

    
9609
  def Exec(self, feedback_fn):
9610
    """Execute disk grow.
9611

9612
    """
9613
    instance = self.instance
9614
    disk = self.disk
9615

    
9616
    disks_ok, _ = _AssembleInstanceDisks(self, self.instance, disks=[disk])
9617
    if not disks_ok:
9618
      raise errors.OpExecError("Cannot activate block device to grow")
9619

    
9620
    # First run all grow ops in dry-run mode
9621
    for node in instance.all_nodes:
9622
      self.cfg.SetDiskID(disk, node)
9623
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount, True)
9624
      result.Raise("Grow request failed to node %s" % node)
9625

    
9626
    # We know that (as far as we can test) operations across different
9627
    # nodes will succeed, time to run it for real
9628
    for node in instance.all_nodes:
9629
      self.cfg.SetDiskID(disk, node)
9630
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount, False)
9631
      result.Raise("Grow request failed to node %s" % node)
9632

    
9633
      # TODO: Rewrite code to work properly
9634
      # DRBD goes into sync mode for a short amount of time after executing the
9635
      # "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby
9636
      # calling "resize" in sync mode fails. Sleeping for a short amount of
9637
      # time is a work-around.
9638
      time.sleep(5)
9639

    
9640
    disk.RecordGrow(self.op.amount)
9641
    self.cfg.Update(instance, feedback_fn)
9642
    if self.op.wait_for_sync:
9643
      disk_abort = not _WaitForSync(self, instance, disks=[disk])
9644
      if disk_abort:
9645
        self.proc.LogWarning("Disk sync-ing has not returned a good"
9646
                             " status; please check the instance")
9647
      if not instance.admin_up:
9648
        _SafeShutdownInstanceDisks(self, instance, disks=[disk])
9649
    elif not instance.admin_up:
9650
      self.proc.LogWarning("Not shutting down the disk even if the instance is"
9651
                           " not supposed to be running because no wait for"
9652
                           " sync mode was requested")
9653

    
9654

    
9655
class LUInstanceQueryData(NoHooksLU):
9656
  """Query runtime instance data.
9657

9658
  """
9659
  REQ_BGL = False
9660

    
9661
  def ExpandNames(self):
9662
    self.needed_locks = {}
9663

    
9664
    # Use locking if requested or when non-static information is wanted
9665
    if not (self.op.static or self.op.use_locking):
9666
      self.LogWarning("Non-static data requested, locks need to be acquired")
9667
      self.op.use_locking = True
9668

    
9669
    if self.op.instances or not self.op.use_locking:
9670
      # Expand instance names right here
9671
      self.wanted_names = _GetWantedInstances(self, self.op.instances)
9672
    else:
9673
      # Will use acquired locks
9674
      self.wanted_names = None
9675

    
9676
    if self.op.use_locking:
9677
      self.share_locks = dict.fromkeys(locking.LEVELS, 1)
9678

    
9679
      if self.wanted_names is None:
9680
        self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
9681
      else:
9682
        self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
9683

    
9684
      self.needed_locks[locking.LEVEL_NODE] = []
9685
      self.share_locks = dict.fromkeys(locking.LEVELS, 1)
9686
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
9687

    
9688
  def DeclareLocks(self, level):
9689
    if self.op.use_locking and level == locking.LEVEL_NODE:
9690
      self._LockInstancesNodes()
9691

    
9692
  def CheckPrereq(self):
9693
    """Check prerequisites.
9694

9695
    This only checks the optional instance list against the existing names.
9696

9697
    """
9698
    if self.wanted_names is None:
9699
      assert self.op.use_locking, "Locking was not used"
9700
      self.wanted_names = self.glm.list_owned(locking.LEVEL_INSTANCE)
9701

    
9702
    self.wanted_instances = [self.cfg.GetInstanceInfo(name)
9703
                             for name in self.wanted_names]
9704

    
9705
  def _ComputeBlockdevStatus(self, node, instance_name, dev):
9706
    """Returns the status of a block device
9707

9708
    """
9709
    if self.op.static or not node:
9710
      return None
9711

    
9712
    self.cfg.SetDiskID(dev, node)
9713

    
9714
    result = self.rpc.call_blockdev_find(node, dev)
9715
    if result.offline:
9716
      return None
9717

    
9718
    result.Raise("Can't compute disk status for %s" % instance_name)
9719

    
9720
    status = result.payload
9721
    if status is None:
9722
      return None
9723

    
9724
    return (status.dev_path, status.major, status.minor,
9725
            status.sync_percent, status.estimated_time,
9726
            status.is_degraded, status.ldisk_status)
9727

    
9728
  def _ComputeDiskStatus(self, instance, snode, dev):
9729
    """Compute block device status.
9730

9731
    """
9732
    if dev.dev_type in constants.LDS_DRBD:
9733
      # we change the snode then (otherwise we use the one passed in)
9734
      if dev.logical_id[0] == instance.primary_node:
9735
        snode = dev.logical_id[1]
9736
      else:
9737
        snode = dev.logical_id[0]
9738

    
9739
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
9740
                                              instance.name, dev)
9741
    dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
9742

    
9743
    if dev.children:
9744
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
9745
                      for child in dev.children]
9746
    else:
9747
      dev_children = []
9748

    
9749
    return {
9750
      "iv_name": dev.iv_name,
9751
      "dev_type": dev.dev_type,
9752
      "logical_id": dev.logical_id,
9753
      "physical_id": dev.physical_id,
9754
      "pstatus": dev_pstatus,
9755
      "sstatus": dev_sstatus,
9756
      "children": dev_children,
9757
      "mode": dev.mode,
9758
      "size": dev.size,
9759
      }
9760

    
9761
  def Exec(self, feedback_fn):
9762
    """Gather and return data"""
9763
    result = {}
9764

    
9765
    cluster = self.cfg.GetClusterInfo()
9766

    
9767
    for instance in self.wanted_instances:
9768
      if not self.op.static:
9769
        remote_info = self.rpc.call_instance_info(instance.primary_node,
9770
                                                  instance.name,
9771
                                                  instance.hypervisor)
9772
        remote_info.Raise("Error checking node %s" % instance.primary_node)
9773
        remote_info = remote_info.payload
9774
        if remote_info and "state" in remote_info:
9775
          remote_state = "up"
9776
        else:
9777
          remote_state = "down"
9778
      else:
9779
        remote_state = None
9780
      if instance.admin_up:
9781
        config_state = "up"
9782
      else:
9783
        config_state = "down"
9784

    
9785
      disks = [self._ComputeDiskStatus(instance, None, device)
9786
               for device in instance.disks]
9787

    
9788
      result[instance.name] = {
9789
        "name": instance.name,
9790
        "config_state": config_state,
9791
        "run_state": remote_state,
9792
        "pnode": instance.primary_node,
9793
        "snodes": instance.secondary_nodes,
9794
        "os": instance.os,
9795
        # this happens to be the same format used for hooks
9796
        "nics": _NICListToTuple(self, instance.nics),
9797
        "disk_template": instance.disk_template,
9798
        "disks": disks,
9799
        "hypervisor": instance.hypervisor,
9800
        "network_port": instance.network_port,
9801
        "hv_instance": instance.hvparams,
9802
        "hv_actual": cluster.FillHV(instance, skip_globals=True),
9803
        "be_instance": instance.beparams,
9804
        "be_actual": cluster.FillBE(instance),
9805
        "os_instance": instance.osparams,
9806
        "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
9807
        "serial_no": instance.serial_no,
9808
        "mtime": instance.mtime,
9809
        "ctime": instance.ctime,
9810
        "uuid": instance.uuid,
9811
        }
9812

    
9813
    return result
9814

    
9815

    
9816
class LUInstanceSetParams(LogicalUnit):
9817
  """Modifies an instances's parameters.
9818

9819
  """
9820
  HPATH = "instance-modify"
9821
  HTYPE = constants.HTYPE_INSTANCE
9822
  REQ_BGL = False
9823

    
9824
  def CheckArguments(self):
9825
    if not (self.op.nics or self.op.disks or self.op.disk_template or
9826
            self.op.hvparams or self.op.beparams or self.op.os_name):
9827
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
9828

    
9829
    if self.op.hvparams:
9830
      _CheckGlobalHvParams(self.op.hvparams)
9831

    
9832
    # Disk validation
9833
    disk_addremove = 0
9834
    for disk_op, disk_dict in self.op.disks:
9835
      utils.ForceDictType(disk_dict, constants.IDISK_PARAMS_TYPES)
9836
      if disk_op == constants.DDM_REMOVE:
9837
        disk_addremove += 1
9838
        continue
9839
      elif disk_op == constants.DDM_ADD:
9840
        disk_addremove += 1
9841
      else:
9842
        if not isinstance(disk_op, int):
9843
          raise errors.OpPrereqError("Invalid disk index", errors.ECODE_INVAL)
9844
        if not isinstance(disk_dict, dict):
9845
          msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
9846
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
9847

    
9848
      if disk_op == constants.DDM_ADD:
9849
        mode = disk_dict.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
9850
        if mode not in constants.DISK_ACCESS_SET:
9851
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
9852
                                     errors.ECODE_INVAL)
9853
        size = disk_dict.get(constants.IDISK_SIZE, None)
9854
        if size is None:
9855
          raise errors.OpPrereqError("Required disk parameter size missing",
9856
                                     errors.ECODE_INVAL)
9857
        try:
9858
          size = int(size)
9859
        except (TypeError, ValueError), err:
9860
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
9861
                                     str(err), errors.ECODE_INVAL)
9862
        disk_dict[constants.IDISK_SIZE] = size
9863
      else:
9864
        # modification of disk
9865
        if constants.IDISK_SIZE in disk_dict:
9866
          raise errors.OpPrereqError("Disk size change not possible, use"
9867
                                     " grow-disk", errors.ECODE_INVAL)
9868

    
9869
    if disk_addremove > 1:
9870
      raise errors.OpPrereqError("Only one disk add or remove operation"
9871
                                 " supported at a time", errors.ECODE_INVAL)
9872

    
9873
    if self.op.disks and self.op.disk_template is not None:
9874
      raise errors.OpPrereqError("Disk template conversion and other disk"
9875
                                 " changes not supported at the same time",
9876
                                 errors.ECODE_INVAL)
9877

    
9878
    if (self.op.disk_template and
9879
        self.op.disk_template in constants.DTS_INT_MIRROR and
9880
        self.op.remote_node is None):
9881
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
9882
                                 " one requires specifying a secondary node",
9883
                                 errors.ECODE_INVAL)
9884

    
9885
    # NIC validation
9886
    nic_addremove = 0
9887
    for nic_op, nic_dict in self.op.nics:
9888
      utils.ForceDictType(nic_dict, constants.INIC_PARAMS_TYPES)
9889
      if nic_op == constants.DDM_REMOVE:
9890
        nic_addremove += 1
9891
        continue
9892
      elif nic_op == constants.DDM_ADD:
9893
        nic_addremove += 1
9894
      else:
9895
        if not isinstance(nic_op, int):
9896
          raise errors.OpPrereqError("Invalid nic index", errors.ECODE_INVAL)
9897
        if not isinstance(nic_dict, dict):
9898
          msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
9899
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
9900

    
9901
      # nic_dict should be a dict
9902
      nic_ip = nic_dict.get(constants.INIC_IP, None)
9903
      if nic_ip is not None:
9904
        if nic_ip.lower() == constants.VALUE_NONE:
9905
          nic_dict[constants.INIC_IP] = None
9906
        else:
9907
          if not netutils.IPAddress.IsValid(nic_ip):
9908
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip,
9909
                                       errors.ECODE_INVAL)
9910

    
9911
      nic_bridge = nic_dict.get('bridge', None)
9912
      nic_link = nic_dict.get(constants.INIC_LINK, None)
9913
      if nic_bridge and nic_link:
9914
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
9915
                                   " at the same time", errors.ECODE_INVAL)
9916
      elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
9917
        nic_dict['bridge'] = None
9918
      elif nic_link and nic_link.lower() == constants.VALUE_NONE:
9919
        nic_dict[constants.INIC_LINK] = None
9920

    
9921
      if nic_op == constants.DDM_ADD:
9922
        nic_mac = nic_dict.get(constants.INIC_MAC, None)
9923
        if nic_mac is None:
9924
          nic_dict[constants.INIC_MAC] = constants.VALUE_AUTO
9925

    
9926
      if constants.INIC_MAC in nic_dict:
9927
        nic_mac = nic_dict[constants.INIC_MAC]
9928
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
9929
          nic_mac = utils.NormalizeAndValidateMac(nic_mac)
9930

    
9931
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
9932
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
9933
                                     " modifying an existing nic",
9934
                                     errors.ECODE_INVAL)
9935

    
9936
    if nic_addremove > 1:
9937
      raise errors.OpPrereqError("Only one NIC add or remove operation"
9938
                                 " supported at a time", errors.ECODE_INVAL)
9939

    
9940
  def ExpandNames(self):
9941
    self._ExpandAndLockInstance()
9942
    self.needed_locks[locking.LEVEL_NODE] = []
9943
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
9944

    
9945
  def DeclareLocks(self, level):
9946
    if level == locking.LEVEL_NODE:
9947
      self._LockInstancesNodes()
9948
      if self.op.disk_template and self.op.remote_node:
9949
        self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
9950
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
9951

    
9952
  def BuildHooksEnv(self):
9953
    """Build hooks env.
9954

9955
    This runs on the master, primary and secondaries.
9956

9957
    """
9958
    args = dict()
9959
    if constants.BE_MEMORY in self.be_new:
9960
      args['memory'] = self.be_new[constants.BE_MEMORY]
9961
    if constants.BE_VCPUS in self.be_new:
9962
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
9963
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
9964
    # information at all.
9965
    if self.op.nics:
9966
      args['nics'] = []
9967
      nic_override = dict(self.op.nics)
9968
      for idx, nic in enumerate(self.instance.nics):
9969
        if idx in nic_override:
9970
          this_nic_override = nic_override[idx]
9971
        else:
9972
          this_nic_override = {}
9973
        if constants.INIC_IP in this_nic_override:
9974
          ip = this_nic_override[constants.INIC_IP]
9975
        else:
9976
          ip = nic.ip
9977
        if constants.INIC_MAC in this_nic_override:
9978
          mac = this_nic_override[constants.INIC_MAC]
9979
        else:
9980
          mac = nic.mac
9981
        if idx in self.nic_pnew:
9982
          nicparams = self.nic_pnew[idx]
9983
        else:
9984
          nicparams = self.cluster.SimpleFillNIC(nic.nicparams)
9985
        mode = nicparams[constants.NIC_MODE]
9986
        link = nicparams[constants.NIC_LINK]
9987
        args['nics'].append((ip, mac, mode, link))
9988
      if constants.DDM_ADD in nic_override:
9989
        ip = nic_override[constants.DDM_ADD].get(constants.INIC_IP, None)
9990
        mac = nic_override[constants.DDM_ADD][constants.INIC_MAC]
9991
        nicparams = self.nic_pnew[constants.DDM_ADD]
9992
        mode = nicparams[constants.NIC_MODE]
9993
        link = nicparams[constants.NIC_LINK]
9994
        args['nics'].append((ip, mac, mode, link))
9995
      elif constants.DDM_REMOVE in nic_override:
9996
        del args['nics'][-1]
9997

    
9998
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
9999
    if self.op.disk_template:
10000
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
10001

    
10002
    return env
10003

    
10004
  def BuildHooksNodes(self):
10005
    """Build hooks nodes.
10006

10007
    """
10008
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
10009
    return (nl, nl)
10010

    
10011
  def CheckPrereq(self):
10012
    """Check prerequisites.
10013

10014
    This only checks the instance list against the existing names.
10015

10016
    """
10017
    # checking the new params on the primary/secondary nodes
10018

    
10019
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
10020
    cluster = self.cluster = self.cfg.GetClusterInfo()
10021
    assert self.instance is not None, \
10022
      "Cannot retrieve locked instance %s" % self.op.instance_name
10023
    pnode = instance.primary_node
10024
    nodelist = list(instance.all_nodes)
10025

    
10026
    # OS change
10027
    if self.op.os_name and not self.op.force:
10028
      _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
10029
                      self.op.force_variant)
10030
      instance_os = self.op.os_name
10031
    else:
10032
      instance_os = instance.os
10033

    
10034
    if self.op.disk_template:
10035
      if instance.disk_template == self.op.disk_template:
10036
        raise errors.OpPrereqError("Instance already has disk template %s" %
10037
                                   instance.disk_template, errors.ECODE_INVAL)
10038

    
10039
      if (instance.disk_template,
10040
          self.op.disk_template) not in self._DISK_CONVERSIONS:
10041
        raise errors.OpPrereqError("Unsupported disk template conversion from"
10042
                                   " %s to %s" % (instance.disk_template,
10043
                                                  self.op.disk_template),
10044
                                   errors.ECODE_INVAL)
10045
      _CheckInstanceDown(self, instance, "cannot change disk template")
10046
      if self.op.disk_template in constants.DTS_INT_MIRROR:
10047
        if self.op.remote_node == pnode:
10048
          raise errors.OpPrereqError("Given new secondary node %s is the same"
10049
                                     " as the primary node of the instance" %
10050
                                     self.op.remote_node, errors.ECODE_STATE)
10051
        _CheckNodeOnline(self, self.op.remote_node)
10052
        _CheckNodeNotDrained(self, self.op.remote_node)
10053
        # FIXME: here we assume that the old instance type is DT_PLAIN
10054
        assert instance.disk_template == constants.DT_PLAIN
10055
        disks = [{constants.IDISK_SIZE: d.size,
10056
                  constants.IDISK_VG: d.logical_id[0]}
10057
                 for d in instance.disks]
10058
        required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
10059
        _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
10060

    
10061
    # hvparams processing
10062
    if self.op.hvparams:
10063
      hv_type = instance.hypervisor
10064
      i_hvdict = _GetUpdatedParams(instance.hvparams, self.op.hvparams)
10065
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
10066
      hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
10067

    
10068
      # local check
10069
      hypervisor.GetHypervisor(hv_type).CheckParameterSyntax(hv_new)
10070
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
10071
      self.hv_new = hv_new # the new actual values
10072
      self.hv_inst = i_hvdict # the new dict (without defaults)
10073
    else:
10074
      self.hv_new = self.hv_inst = {}
10075

    
10076
    # beparams processing
10077
    if self.op.beparams:
10078
      i_bedict = _GetUpdatedParams(instance.beparams, self.op.beparams,
10079
                                   use_none=True)
10080
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
10081
      be_new = cluster.SimpleFillBE(i_bedict)
10082
      self.be_new = be_new # the new actual values
10083
      self.be_inst = i_bedict # the new dict (without defaults)
10084
    else:
10085
      self.be_new = self.be_inst = {}
10086

    
10087
    # osparams processing
10088
    if self.op.osparams:
10089
      i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
10090
      _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
10091
      self.os_inst = i_osdict # the new dict (without defaults)
10092
    else:
10093
      self.os_inst = {}
10094

    
10095
    self.warn = []
10096

    
10097
    if constants.BE_MEMORY in self.op.beparams and not self.op.force:
10098
      mem_check_list = [pnode]
10099
      if be_new[constants.BE_AUTO_BALANCE]:
10100
        # either we changed auto_balance to yes or it was from before
10101
        mem_check_list.extend(instance.secondary_nodes)
10102
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
10103
                                                  instance.hypervisor)
10104
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
10105
                                         instance.hypervisor)
10106
      pninfo = nodeinfo[pnode]
10107
      msg = pninfo.fail_msg
10108
      if msg:
10109
        # Assume the primary node is unreachable and go ahead
10110
        self.warn.append("Can't get info from primary node %s: %s" %
10111
                         (pnode,  msg))
10112
      elif not isinstance(pninfo.payload.get('memory_free', None), int):
10113
        self.warn.append("Node data from primary node %s doesn't contain"
10114
                         " free memory information" % pnode)
10115
      elif instance_info.fail_msg:
10116
        self.warn.append("Can't get instance runtime information: %s" %
10117
                        instance_info.fail_msg)
10118
      else:
10119
        if instance_info.payload:
10120
          current_mem = int(instance_info.payload['memory'])
10121
        else:
10122
          # Assume instance not running
10123
          # (there is a slight race condition here, but it's not very probable,
10124
          # and we have no other way to check)
10125
          current_mem = 0
10126
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
10127
                    pninfo.payload['memory_free'])
10128
        if miss_mem > 0:
10129
          raise errors.OpPrereqError("This change will prevent the instance"
10130
                                     " from starting, due to %d MB of memory"
10131
                                     " missing on its primary node" % miss_mem,
10132
                                     errors.ECODE_NORES)
10133

    
10134
      if be_new[constants.BE_AUTO_BALANCE]:
10135
        for node, nres in nodeinfo.items():
10136
          if node not in instance.secondary_nodes:
10137
            continue
10138
          msg = nres.fail_msg
10139
          if msg:
10140
            self.warn.append("Can't get info from secondary node %s: %s" %
10141
                             (node, msg))
10142
          elif not isinstance(nres.payload.get('memory_free', None), int):
10143
            self.warn.append("Secondary node %s didn't return free"
10144
                             " memory information" % node)
10145
          elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
10146
            self.warn.append("Not enough memory to failover instance to"
10147
                             " secondary node %s" % node)
10148

    
10149
    # NIC processing
10150
    self.nic_pnew = {}
10151
    self.nic_pinst = {}
10152
    for nic_op, nic_dict in self.op.nics:
10153
      if nic_op == constants.DDM_REMOVE:
10154
        if not instance.nics:
10155
          raise errors.OpPrereqError("Instance has no NICs, cannot remove",
10156
                                     errors.ECODE_INVAL)
10157
        continue
10158
      if nic_op != constants.DDM_ADD:
10159
        # an existing nic
10160
        if not instance.nics:
10161
          raise errors.OpPrereqError("Invalid NIC index %s, instance has"
10162
                                     " no NICs" % nic_op,
10163
                                     errors.ECODE_INVAL)
10164
        if nic_op < 0 or nic_op >= len(instance.nics):
10165
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
10166
                                     " are 0 to %d" %
10167
                                     (nic_op, len(instance.nics) - 1),
10168
                                     errors.ECODE_INVAL)
10169
        old_nic_params = instance.nics[nic_op].nicparams
10170
        old_nic_ip = instance.nics[nic_op].ip
10171
      else:
10172
        old_nic_params = {}
10173
        old_nic_ip = None
10174

    
10175
      update_params_dict = dict([(key, nic_dict[key])
10176
                                 for key in constants.NICS_PARAMETERS
10177
                                 if key in nic_dict])
10178

    
10179
      if 'bridge' in nic_dict:
10180
        update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
10181

    
10182
      new_nic_params = _GetUpdatedParams(old_nic_params,
10183
                                         update_params_dict)
10184
      utils.ForceDictType(new_nic_params, constants.NICS_PARAMETER_TYPES)
10185
      new_filled_nic_params = cluster.SimpleFillNIC(new_nic_params)
10186
      objects.NIC.CheckParameterSyntax(new_filled_nic_params)
10187
      self.nic_pinst[nic_op] = new_nic_params
10188
      self.nic_pnew[nic_op] = new_filled_nic_params
10189
      new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
10190

    
10191
      if new_nic_mode == constants.NIC_MODE_BRIDGED:
10192
        nic_bridge = new_filled_nic_params[constants.NIC_LINK]
10193
        msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
10194
        if msg:
10195
          msg = "Error checking bridges on node %s: %s" % (pnode, msg)
10196
          if self.op.force:
10197
            self.warn.append(msg)
10198
          else:
10199
            raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
10200
      if new_nic_mode == constants.NIC_MODE_ROUTED:
10201
        if constants.INIC_IP in nic_dict:
10202
          nic_ip = nic_dict[constants.INIC_IP]
10203
        else:
10204
          nic_ip = old_nic_ip
10205
        if nic_ip is None:
10206
          raise errors.OpPrereqError('Cannot set the nic ip to None'
10207
                                     ' on a routed nic', errors.ECODE_INVAL)
10208
      if constants.INIC_MAC in nic_dict:
10209
        nic_mac = nic_dict[constants.INIC_MAC]
10210
        if nic_mac is None:
10211
          raise errors.OpPrereqError('Cannot set the nic mac to None',
10212
                                     errors.ECODE_INVAL)
10213
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
10214
          # otherwise generate the mac
10215
          nic_dict[constants.INIC_MAC] = \
10216
            self.cfg.GenerateMAC(self.proc.GetECId())
10217
        else:
10218
          # or validate/reserve the current one
10219
          try:
10220
            self.cfg.ReserveMAC(nic_mac, self.proc.GetECId())
10221
          except errors.ReservationError:
10222
            raise errors.OpPrereqError("MAC address %s already in use"
10223
                                       " in cluster" % nic_mac,
10224
                                       errors.ECODE_NOTUNIQUE)
10225

    
10226
    # DISK processing
10227
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
10228
      raise errors.OpPrereqError("Disk operations not supported for"
10229
                                 " diskless instances",
10230
                                 errors.ECODE_INVAL)
10231
    for disk_op, _ in self.op.disks:
10232
      if disk_op == constants.DDM_REMOVE:
10233
        if len(instance.disks) == 1:
10234
          raise errors.OpPrereqError("Cannot remove the last disk of"
10235
                                     " an instance", errors.ECODE_INVAL)
10236
        _CheckInstanceDown(self, instance, "cannot remove disks")
10237

    
10238
      if (disk_op == constants.DDM_ADD and
10239
          len(instance.disks) >= constants.MAX_DISKS):
10240
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
10241
                                   " add more" % constants.MAX_DISKS,
10242
                                   errors.ECODE_STATE)
10243
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
10244
        # an existing disk
10245
        if disk_op < 0 or disk_op >= len(instance.disks):
10246
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
10247
                                     " are 0 to %d" %
10248
                                     (disk_op, len(instance.disks)),
10249
                                     errors.ECODE_INVAL)
10250

    
10251
    return
10252

    
10253
  def _ConvertPlainToDrbd(self, feedback_fn):
10254
    """Converts an instance from plain to drbd.
10255

10256
    """
10257
    feedback_fn("Converting template to drbd")
10258
    instance = self.instance
10259
    pnode = instance.primary_node
10260
    snode = self.op.remote_node
10261

    
10262
    # create a fake disk info for _GenerateDiskTemplate
10263
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
10264
                  constants.IDISK_VG: d.logical_id[0]}
10265
                 for d in instance.disks]
10266
    new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
10267
                                      instance.name, pnode, [snode],
10268
                                      disk_info, None, None, 0, feedback_fn)
10269
    info = _GetInstanceInfoText(instance)
10270
    feedback_fn("Creating aditional volumes...")
10271
    # first, create the missing data and meta devices
10272
    for disk in new_disks:
10273
      # unfortunately this is... not too nice
10274
      _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
10275
                            info, True)
10276
      for child in disk.children:
10277
        _CreateSingleBlockDev(self, snode, instance, child, info, True)
10278
    # at this stage, all new LVs have been created, we can rename the
10279
    # old ones
10280
    feedback_fn("Renaming original volumes...")
10281
    rename_list = [(o, n.children[0].logical_id)
10282
                   for (o, n) in zip(instance.disks, new_disks)]
10283
    result = self.rpc.call_blockdev_rename(pnode, rename_list)
10284
    result.Raise("Failed to rename original LVs")
10285

    
10286
    feedback_fn("Initializing DRBD devices...")
10287
    # all child devices are in place, we can now create the DRBD devices
10288
    for disk in new_disks:
10289
      for node in [pnode, snode]:
10290
        f_create = node == pnode
10291
        _CreateSingleBlockDev(self, node, instance, disk, info, f_create)
10292

    
10293
    # at this point, the instance has been modified
10294
    instance.disk_template = constants.DT_DRBD8
10295
    instance.disks = new_disks
10296
    self.cfg.Update(instance, feedback_fn)
10297

    
10298
    # disks are created, waiting for sync
10299
    disk_abort = not _WaitForSync(self, instance,
10300
                                  oneshot=not self.op.wait_for_sync)
10301
    if disk_abort:
10302
      raise errors.OpExecError("There are some degraded disks for"
10303
                               " this instance, please cleanup manually")
10304

    
10305
  def _ConvertDrbdToPlain(self, feedback_fn):
10306
    """Converts an instance from drbd to plain.
10307

10308
    """
10309
    instance = self.instance
10310
    assert len(instance.secondary_nodes) == 1
10311
    pnode = instance.primary_node
10312
    snode = instance.secondary_nodes[0]
10313
    feedback_fn("Converting template to plain")
10314

    
10315
    old_disks = instance.disks
10316
    new_disks = [d.children[0] for d in old_disks]
10317

    
10318
    # copy over size and mode
10319
    for parent, child in zip(old_disks, new_disks):
10320
      child.size = parent.size
10321
      child.mode = parent.mode
10322

    
10323
    # update instance structure
10324
    instance.disks = new_disks
10325
    instance.disk_template = constants.DT_PLAIN
10326
    self.cfg.Update(instance, feedback_fn)
10327

    
10328
    feedback_fn("Removing volumes on the secondary node...")
10329
    for disk in old_disks:
10330
      self.cfg.SetDiskID(disk, snode)
10331
      msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
10332
      if msg:
10333
        self.LogWarning("Could not remove block device %s on node %s,"
10334
                        " continuing anyway: %s", disk.iv_name, snode, msg)
10335

    
10336
    feedback_fn("Removing unneeded volumes on the primary node...")
10337
    for idx, disk in enumerate(old_disks):
10338
      meta = disk.children[1]
10339
      self.cfg.SetDiskID(meta, pnode)
10340
      msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
10341
      if msg:
10342
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
10343
                        " continuing anyway: %s", idx, pnode, msg)
10344

    
10345
  def Exec(self, feedback_fn):
10346
    """Modifies an instance.
10347

10348
    All parameters take effect only at the next restart of the instance.
10349

10350
    """
10351
    # Process here the warnings from CheckPrereq, as we don't have a
10352
    # feedback_fn there.
10353
    for warn in self.warn:
10354
      feedback_fn("WARNING: %s" % warn)
10355

    
10356
    result = []
10357
    instance = self.instance
10358
    # disk changes
10359
    for disk_op, disk_dict in self.op.disks:
10360
      if disk_op == constants.DDM_REMOVE:
10361
        # remove the last disk
10362
        device = instance.disks.pop()
10363
        device_idx = len(instance.disks)
10364
        for node, disk in device.ComputeNodeTree(instance.primary_node):
10365
          self.cfg.SetDiskID(disk, node)
10366
          msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
10367
          if msg:
10368
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
10369
                            " continuing anyway", device_idx, node, msg)
10370
        result.append(("disk/%d" % device_idx, "remove"))
10371
      elif disk_op == constants.DDM_ADD:
10372
        # add a new disk
10373
        if instance.disk_template in (constants.DT_FILE,
10374
                                        constants.DT_SHARED_FILE):
10375
          file_driver, file_path = instance.disks[0].logical_id
10376
          file_path = os.path.dirname(file_path)
10377
        else:
10378
          file_driver = file_path = None
10379
        disk_idx_base = len(instance.disks)
10380
        new_disk = _GenerateDiskTemplate(self,
10381
                                         instance.disk_template,
10382
                                         instance.name, instance.primary_node,
10383
                                         instance.secondary_nodes,
10384
                                         [disk_dict],
10385
                                         file_path,
10386
                                         file_driver,
10387
                                         disk_idx_base, feedback_fn)[0]
10388
        instance.disks.append(new_disk)
10389
        info = _GetInstanceInfoText(instance)
10390

    
10391
        logging.info("Creating volume %s for instance %s",
10392
                     new_disk.iv_name, instance.name)
10393
        # Note: this needs to be kept in sync with _CreateDisks
10394
        #HARDCODE
10395
        for node in instance.all_nodes:
10396
          f_create = node == instance.primary_node
10397
          try:
10398
            _CreateBlockDev(self, node, instance, new_disk,
10399
                            f_create, info, f_create)
10400
          except errors.OpExecError, err:
10401
            self.LogWarning("Failed to create volume %s (%s) on"
10402
                            " node %s: %s",
10403
                            new_disk.iv_name, new_disk, node, err)
10404
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
10405
                       (new_disk.size, new_disk.mode)))
10406
      else:
10407
        # change a given disk
10408
        instance.disks[disk_op].mode = disk_dict[constants.IDISK_MODE]
10409
        result.append(("disk.mode/%d" % disk_op,
10410
                       disk_dict[constants.IDISK_MODE]))
10411

    
10412
    if self.op.disk_template:
10413
      r_shut = _ShutdownInstanceDisks(self, instance)
10414
      if not r_shut:
10415
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
10416
                                 " proceed with disk template conversion")
10417
      mode = (instance.disk_template, self.op.disk_template)
10418
      try:
10419
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
10420
      except:
10421
        self.cfg.ReleaseDRBDMinors(instance.name)
10422
        raise
10423
      result.append(("disk_template", self.op.disk_template))
10424

    
10425
    # NIC changes
10426
    for nic_op, nic_dict in self.op.nics:
10427
      if nic_op == constants.DDM_REMOVE:
10428
        # remove the last nic
10429
        del instance.nics[-1]
10430
        result.append(("nic.%d" % len(instance.nics), "remove"))
10431
      elif nic_op == constants.DDM_ADD:
10432
        # mac and bridge should be set, by now
10433
        mac = nic_dict[constants.INIC_MAC]
10434
        ip = nic_dict.get(constants.INIC_IP, None)
10435
        nicparams = self.nic_pinst[constants.DDM_ADD]
10436
        new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
10437
        instance.nics.append(new_nic)
10438
        result.append(("nic.%d" % (len(instance.nics) - 1),
10439
                       "add:mac=%s,ip=%s,mode=%s,link=%s" %
10440
                       (new_nic.mac, new_nic.ip,
10441
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
10442
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
10443
                       )))
10444
      else:
10445
        for key in (constants.INIC_MAC, constants.INIC_IP):
10446
          if key in nic_dict:
10447
            setattr(instance.nics[nic_op], key, nic_dict[key])
10448
        if nic_op in self.nic_pinst:
10449
          instance.nics[nic_op].nicparams = self.nic_pinst[nic_op]
10450
        for key, val in nic_dict.iteritems():
10451
          result.append(("nic.%s/%d" % (key, nic_op), val))
10452

    
10453
    # hvparams changes
10454
    if self.op.hvparams:
10455
      instance.hvparams = self.hv_inst
10456
      for key, val in self.op.hvparams.iteritems():
10457
        result.append(("hv/%s" % key, val))
10458

    
10459
    # beparams changes
10460
    if self.op.beparams:
10461
      instance.beparams = self.be_inst
10462
      for key, val in self.op.beparams.iteritems():
10463
        result.append(("be/%s" % key, val))
10464

    
10465
    # OS change
10466
    if self.op.os_name:
10467
      instance.os = self.op.os_name
10468

    
10469
    # osparams changes
10470
    if self.op.osparams:
10471
      instance.osparams = self.os_inst
10472
      for key, val in self.op.osparams.iteritems():
10473
        result.append(("os/%s" % key, val))
10474

    
10475
    self.cfg.Update(instance, feedback_fn)
10476

    
10477
    return result
10478

    
10479
  _DISK_CONVERSIONS = {
10480
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
10481
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
10482
    }
10483

    
10484

    
10485
class LUBackupQuery(NoHooksLU):
10486
  """Query the exports list
10487

10488
  """
10489
  REQ_BGL = False
10490

    
10491
  def ExpandNames(self):
10492
    self.needed_locks = {}
10493
    self.share_locks[locking.LEVEL_NODE] = 1
10494
    if not self.op.nodes:
10495
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
10496
    else:
10497
      self.needed_locks[locking.LEVEL_NODE] = \
10498
        _GetWantedNodes(self, self.op.nodes)
10499

    
10500
  def Exec(self, feedback_fn):
10501
    """Compute the list of all the exported system images.
10502

10503
    @rtype: dict
10504
    @return: a dictionary with the structure node->(export-list)
10505
        where export-list is a list of the instances exported on
10506
        that node.
10507

10508
    """
10509
    self.nodes = self.glm.list_owned(locking.LEVEL_NODE)
10510
    rpcresult = self.rpc.call_export_list(self.nodes)
10511
    result = {}
10512
    for node in rpcresult:
10513
      if rpcresult[node].fail_msg:
10514
        result[node] = False
10515
      else:
10516
        result[node] = rpcresult[node].payload
10517

    
10518
    return result
10519

    
10520

    
10521
class LUBackupPrepare(NoHooksLU):
10522
  """Prepares an instance for an export and returns useful information.
10523

10524
  """
10525
  REQ_BGL = False
10526

    
10527
  def ExpandNames(self):
10528
    self._ExpandAndLockInstance()
10529

    
10530
  def CheckPrereq(self):
10531
    """Check prerequisites.
10532

10533
    """
10534
    instance_name = self.op.instance_name
10535

    
10536
    self.instance = self.cfg.GetInstanceInfo(instance_name)
10537
    assert self.instance is not None, \
10538
          "Cannot retrieve locked instance %s" % self.op.instance_name
10539
    _CheckNodeOnline(self, self.instance.primary_node)
10540

    
10541
    self._cds = _GetClusterDomainSecret()
10542

    
10543
  def Exec(self, feedback_fn):
10544
    """Prepares an instance for an export.
10545

10546
    """
10547
    instance = self.instance
10548

    
10549
    if self.op.mode == constants.EXPORT_MODE_REMOTE:
10550
      salt = utils.GenerateSecret(8)
10551

    
10552
      feedback_fn("Generating X509 certificate on %s" % instance.primary_node)
10553
      result = self.rpc.call_x509_cert_create(instance.primary_node,
10554
                                              constants.RIE_CERT_VALIDITY)
10555
      result.Raise("Can't create X509 key and certificate on %s" % result.node)
10556

    
10557
      (name, cert_pem) = result.payload
10558

    
10559
      cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
10560
                                             cert_pem)
10561

    
10562
      return {
10563
        "handshake": masterd.instance.ComputeRemoteExportHandshake(self._cds),
10564
        "x509_key_name": (name, utils.Sha1Hmac(self._cds, name, salt=salt),
10565
                          salt),
10566
        "x509_ca": utils.SignX509Certificate(cert, self._cds, salt),
10567
        }
10568

    
10569
    return None
10570

    
10571

    
10572
class LUBackupExport(LogicalUnit):
10573
  """Export an instance to an image in the cluster.
10574

10575
  """
10576
  HPATH = "instance-export"
10577
  HTYPE = constants.HTYPE_INSTANCE
10578
  REQ_BGL = False
10579

    
10580
  def CheckArguments(self):
10581
    """Check the arguments.
10582

10583
    """
10584
    self.x509_key_name = self.op.x509_key_name
10585
    self.dest_x509_ca_pem = self.op.destination_x509_ca
10586

    
10587
    if self.op.mode == constants.EXPORT_MODE_REMOTE:
10588
      if not self.x509_key_name:
10589
        raise errors.OpPrereqError("Missing X509 key name for encryption",
10590
                                   errors.ECODE_INVAL)
10591

    
10592
      if not self.dest_x509_ca_pem:
10593
        raise errors.OpPrereqError("Missing destination X509 CA",
10594
                                   errors.ECODE_INVAL)
10595

    
10596
  def ExpandNames(self):
10597
    self._ExpandAndLockInstance()
10598

    
10599
    # Lock all nodes for local exports
10600
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
10601
      # FIXME: lock only instance primary and destination node
10602
      #
10603
      # Sad but true, for now we have do lock all nodes, as we don't know where
10604
      # the previous export might be, and in this LU we search for it and
10605
      # remove it from its current node. In the future we could fix this by:
10606
      #  - making a tasklet to search (share-lock all), then create the
10607
      #    new one, then one to remove, after
10608
      #  - removing the removal operation altogether
10609
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
10610

    
10611
  def DeclareLocks(self, level):
10612
    """Last minute lock declaration."""
10613
    # All nodes are locked anyway, so nothing to do here.
10614

    
10615
  def BuildHooksEnv(self):
10616
    """Build hooks env.
10617

10618
    This will run on the master, primary node and target node.
10619

10620
    """
10621
    env = {
10622
      "EXPORT_MODE": self.op.mode,
10623
      "EXPORT_NODE": self.op.target_node,
10624
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
10625
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
10626
      # TODO: Generic function for boolean env variables
10627
      "REMOVE_INSTANCE": str(bool(self.op.remove_instance)),
10628
      }
10629

    
10630
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
10631

    
10632
    return env
10633

    
10634
  def BuildHooksNodes(self):
10635
    """Build hooks nodes.
10636

10637
    """
10638
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node]
10639

    
10640
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
10641
      nl.append(self.op.target_node)
10642

    
10643
    return (nl, nl)
10644

    
10645
  def CheckPrereq(self):
10646
    """Check prerequisites.
10647

10648
    This checks that the instance and node names are valid.
10649

10650
    """
10651
    instance_name = self.op.instance_name
10652

    
10653
    self.instance = self.cfg.GetInstanceInfo(instance_name)
10654
    assert self.instance is not None, \
10655
          "Cannot retrieve locked instance %s" % self.op.instance_name
10656
    _CheckNodeOnline(self, self.instance.primary_node)
10657

    
10658
    if (self.op.remove_instance and self.instance.admin_up and
10659
        not self.op.shutdown):
10660
      raise errors.OpPrereqError("Can not remove instance without shutting it"
10661
                                 " down before")
10662

    
10663
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
10664
      self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
10665
      self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
10666
      assert self.dst_node is not None
10667

    
10668
      _CheckNodeOnline(self, self.dst_node.name)
10669
      _CheckNodeNotDrained(self, self.dst_node.name)
10670

    
10671
      self._cds = None
10672
      self.dest_disk_info = None
10673
      self.dest_x509_ca = None
10674

    
10675
    elif self.op.mode == constants.EXPORT_MODE_REMOTE:
10676
      self.dst_node = None
10677

    
10678
      if len(self.op.target_node) != len(self.instance.disks):
10679
        raise errors.OpPrereqError(("Received destination information for %s"
10680
                                    " disks, but instance %s has %s disks") %
10681
                                   (len(self.op.target_node), instance_name,
10682
                                    len(self.instance.disks)),
10683
                                   errors.ECODE_INVAL)
10684

    
10685
      cds = _GetClusterDomainSecret()
10686

    
10687
      # Check X509 key name
10688
      try:
10689
        (key_name, hmac_digest, hmac_salt) = self.x509_key_name
10690
      except (TypeError, ValueError), err:
10691
        raise errors.OpPrereqError("Invalid data for X509 key name: %s" % err)
10692

    
10693
      if not utils.VerifySha1Hmac(cds, key_name, hmac_digest, salt=hmac_salt):
10694
        raise errors.OpPrereqError("HMAC for X509 key name is wrong",
10695
                                   errors.ECODE_INVAL)
10696

    
10697
      # Load and verify CA
10698
      try:
10699
        (cert, _) = utils.LoadSignedX509Certificate(self.dest_x509_ca_pem, cds)
10700
      except OpenSSL.crypto.Error, err:
10701
        raise errors.OpPrereqError("Unable to load destination X509 CA (%s)" %
10702
                                   (err, ), errors.ECODE_INVAL)
10703

    
10704
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
10705
      if errcode is not None:
10706
        raise errors.OpPrereqError("Invalid destination X509 CA (%s)" %
10707
                                   (msg, ), errors.ECODE_INVAL)
10708

    
10709
      self.dest_x509_ca = cert
10710

    
10711
      # Verify target information
10712
      disk_info = []
10713
      for idx, disk_data in enumerate(self.op.target_node):
10714
        try:
10715
          (host, port, magic) = \
10716
            masterd.instance.CheckRemoteExportDiskInfo(cds, idx, disk_data)
10717
        except errors.GenericError, err:
10718
          raise errors.OpPrereqError("Target info for disk %s: %s" %
10719
                                     (idx, err), errors.ECODE_INVAL)
10720

    
10721
        disk_info.append((host, port, magic))
10722

    
10723
      assert len(disk_info) == len(self.op.target_node)
10724
      self.dest_disk_info = disk_info
10725

    
10726
    else:
10727
      raise errors.ProgrammerError("Unhandled export mode %r" %
10728
                                   self.op.mode)
10729

    
10730
    # instance disk type verification
10731
    # TODO: Implement export support for file-based disks
10732
    for disk in self.instance.disks:
10733
      if disk.dev_type == constants.LD_FILE:
10734
        raise errors.OpPrereqError("Export not supported for instances with"
10735
                                   " file-based disks", errors.ECODE_INVAL)
10736

    
10737
  def _CleanupExports(self, feedback_fn):
10738
    """Removes exports of current instance from all other nodes.
10739

10740
    If an instance in a cluster with nodes A..D was exported to node C, its
10741
    exports will be removed from the nodes A, B and D.
10742

10743
    """
10744
    assert self.op.mode != constants.EXPORT_MODE_REMOTE
10745

    
10746
    nodelist = self.cfg.GetNodeList()
10747
    nodelist.remove(self.dst_node.name)
10748

    
10749
    # on one-node clusters nodelist will be empty after the removal
10750
    # if we proceed the backup would be removed because OpBackupQuery
10751
    # substitutes an empty list with the full cluster node list.
10752
    iname = self.instance.name
10753
    if nodelist:
10754
      feedback_fn("Removing old exports for instance %s" % iname)
10755
      exportlist = self.rpc.call_export_list(nodelist)
10756
      for node in exportlist:
10757
        if exportlist[node].fail_msg:
10758
          continue
10759
        if iname in exportlist[node].payload:
10760
          msg = self.rpc.call_export_remove(node, iname).fail_msg
10761
          if msg:
10762
            self.LogWarning("Could not remove older export for instance %s"
10763
                            " on node %s: %s", iname, node, msg)
10764

    
10765
  def Exec(self, feedback_fn):
10766
    """Export an instance to an image in the cluster.
10767

10768
    """
10769
    assert self.op.mode in constants.EXPORT_MODES
10770

    
10771
    instance = self.instance
10772
    src_node = instance.primary_node
10773

    
10774
    if self.op.shutdown:
10775
      # shutdown the instance, but not the disks
10776
      feedback_fn("Shutting down instance %s" % instance.name)
10777
      result = self.rpc.call_instance_shutdown(src_node, instance,
10778
                                               self.op.shutdown_timeout)
10779
      # TODO: Maybe ignore failures if ignore_remove_failures is set
10780
      result.Raise("Could not shutdown instance %s on"
10781
                   " node %s" % (instance.name, src_node))
10782

    
10783
    # set the disks ID correctly since call_instance_start needs the
10784
    # correct drbd minor to create the symlinks
10785
    for disk in instance.disks:
10786
      self.cfg.SetDiskID(disk, src_node)
10787

    
10788
    activate_disks = (not instance.admin_up)
10789

    
10790
    if activate_disks:
10791
      # Activate the instance disks if we'exporting a stopped instance
10792
      feedback_fn("Activating disks for %s" % instance.name)
10793
      _StartInstanceDisks(self, instance, None)
10794

    
10795
    try:
10796
      helper = masterd.instance.ExportInstanceHelper(self, feedback_fn,
10797
                                                     instance)
10798

    
10799
      helper.CreateSnapshots()
10800
      try:
10801
        if (self.op.shutdown and instance.admin_up and
10802
            not self.op.remove_instance):
10803
          assert not activate_disks
10804
          feedback_fn("Starting instance %s" % instance.name)
10805
          result = self.rpc.call_instance_start(src_node, instance, None, None)
10806
          msg = result.fail_msg
10807
          if msg:
10808
            feedback_fn("Failed to start instance: %s" % msg)
10809
            _ShutdownInstanceDisks(self, instance)
10810
            raise errors.OpExecError("Could not start instance: %s" % msg)
10811

    
10812
        if self.op.mode == constants.EXPORT_MODE_LOCAL:
10813
          (fin_resu, dresults) = helper.LocalExport(self.dst_node)
10814
        elif self.op.mode == constants.EXPORT_MODE_REMOTE:
10815
          connect_timeout = constants.RIE_CONNECT_TIMEOUT
10816
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
10817

    
10818
          (key_name, _, _) = self.x509_key_name
10819

    
10820
          dest_ca_pem = \
10821
            OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
10822
                                            self.dest_x509_ca)
10823

    
10824
          (fin_resu, dresults) = helper.RemoteExport(self.dest_disk_info,
10825
                                                     key_name, dest_ca_pem,
10826
                                                     timeouts)
10827
      finally:
10828
        helper.Cleanup()
10829

    
10830
      # Check for backwards compatibility
10831
      assert len(dresults) == len(instance.disks)
10832
      assert compat.all(isinstance(i, bool) for i in dresults), \
10833
             "Not all results are boolean: %r" % dresults
10834

    
10835
    finally:
10836
      if activate_disks:
10837
        feedback_fn("Deactivating disks for %s" % instance.name)
10838
        _ShutdownInstanceDisks(self, instance)
10839

    
10840
    if not (compat.all(dresults) and fin_resu):
10841
      failures = []
10842
      if not fin_resu:
10843
        failures.append("export finalization")
10844
      if not compat.all(dresults):
10845
        fdsk = utils.CommaJoin(idx for (idx, dsk) in enumerate(dresults)
10846
                               if not dsk)
10847
        failures.append("disk export: disk(s) %s" % fdsk)
10848

    
10849
      raise errors.OpExecError("Export failed, errors in %s" %
10850
                               utils.CommaJoin(failures))
10851

    
10852
    # At this point, the export was successful, we can cleanup/finish
10853

    
10854
    # Remove instance if requested
10855
    if self.op.remove_instance:
10856
      feedback_fn("Removing instance %s" % instance.name)
10857
      _RemoveInstance(self, feedback_fn, instance,
10858
                      self.op.ignore_remove_failures)
10859

    
10860
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
10861
      self._CleanupExports(feedback_fn)
10862

    
10863
    return fin_resu, dresults
10864

    
10865

    
10866
class LUBackupRemove(NoHooksLU):
10867
  """Remove exports related to the named instance.
10868

10869
  """
10870
  REQ_BGL = False
10871

    
10872
  def ExpandNames(self):
10873
    self.needed_locks = {}
10874
    # We need all nodes to be locked in order for RemoveExport to work, but we
10875
    # don't need to lock the instance itself, as nothing will happen to it (and
10876
    # we can remove exports also for a removed instance)
10877
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
10878

    
10879
  def Exec(self, feedback_fn):
10880
    """Remove any export.
10881

10882
    """
10883
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
10884
    # If the instance was not found we'll try with the name that was passed in.
10885
    # This will only work if it was an FQDN, though.
10886
    fqdn_warn = False
10887
    if not instance_name:
10888
      fqdn_warn = True
10889
      instance_name = self.op.instance_name
10890

    
10891
    locked_nodes = self.glm.list_owned(locking.LEVEL_NODE)
10892
    exportlist = self.rpc.call_export_list(locked_nodes)
10893
    found = False
10894
    for node in exportlist:
10895
      msg = exportlist[node].fail_msg
10896
      if msg:
10897
        self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
10898
        continue
10899
      if instance_name in exportlist[node].payload:
10900
        found = True
10901
        result = self.rpc.call_export_remove(node, instance_name)
10902
        msg = result.fail_msg
10903
        if msg:
10904
          logging.error("Could not remove export for instance %s"
10905
                        " on node %s: %s", instance_name, node, msg)
10906

    
10907
    if fqdn_warn and not found:
10908
      feedback_fn("Export not found. If trying to remove an export belonging"
10909
                  " to a deleted instance please use its Fully Qualified"
10910
                  " Domain Name.")
10911

    
10912

    
10913
class LUGroupAdd(LogicalUnit):
10914
  """Logical unit for creating node groups.
10915

10916
  """
10917
  HPATH = "group-add"
10918
  HTYPE = constants.HTYPE_GROUP
10919
  REQ_BGL = False
10920

    
10921
  def ExpandNames(self):
10922
    # We need the new group's UUID here so that we can create and acquire the
10923
    # corresponding lock. Later, in Exec(), we'll indicate to cfg.AddNodeGroup
10924
    # that it should not check whether the UUID exists in the configuration.
10925
    self.group_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
10926
    self.needed_locks = {}
10927
    self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
10928

    
10929
  def CheckPrereq(self):
10930
    """Check prerequisites.
10931

10932
    This checks that the given group name is not an existing node group
10933
    already.
10934

10935
    """
10936
    try:
10937
      existing_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10938
    except errors.OpPrereqError:
10939
      pass
10940
    else:
10941
      raise errors.OpPrereqError("Desired group name '%s' already exists as a"
10942
                                 " node group (UUID: %s)" %
10943
                                 (self.op.group_name, existing_uuid),
10944
                                 errors.ECODE_EXISTS)
10945

    
10946
    if self.op.ndparams:
10947
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
10948

    
10949
  def BuildHooksEnv(self):
10950
    """Build hooks env.
10951

10952
    """
10953
    return {
10954
      "GROUP_NAME": self.op.group_name,
10955
      }
10956

    
10957
  def BuildHooksNodes(self):
10958
    """Build hooks nodes.
10959

10960
    """
10961
    mn = self.cfg.GetMasterNode()
10962
    return ([mn], [mn])
10963

    
10964
  def Exec(self, feedback_fn):
10965
    """Add the node group to the cluster.
10966

10967
    """
10968
    group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
10969
                                  uuid=self.group_uuid,
10970
                                  alloc_policy=self.op.alloc_policy,
10971
                                  ndparams=self.op.ndparams)
10972

    
10973
    self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
10974
    del self.remove_locks[locking.LEVEL_NODEGROUP]
10975

    
10976

    
10977
class LUGroupAssignNodes(NoHooksLU):
10978
  """Logical unit for assigning nodes to groups.
10979

10980
  """
10981
  REQ_BGL = False
10982

    
10983
  def ExpandNames(self):
10984
    # These raise errors.OpPrereqError on their own:
10985
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10986
    self.op.nodes = _GetWantedNodes(self, self.op.nodes)
10987

    
10988
    # We want to lock all the affected nodes and groups. We have readily
10989
    # available the list of nodes, and the *destination* group. To gather the
10990
    # list of "source" groups, we need to fetch node information later on.
10991
    self.needed_locks = {
10992
      locking.LEVEL_NODEGROUP: set([self.group_uuid]),
10993
      locking.LEVEL_NODE: self.op.nodes,
10994
      }
10995

    
10996
  def DeclareLocks(self, level):
10997
    if level == locking.LEVEL_NODEGROUP:
10998
      assert len(self.needed_locks[locking.LEVEL_NODEGROUP]) == 1
10999

    
11000
      # Try to get all affected nodes' groups without having the group or node
11001
      # lock yet. Needs verification later in the code flow.
11002
      groups = self.cfg.GetNodeGroupsFromNodes(self.op.nodes)
11003

    
11004
      self.needed_locks[locking.LEVEL_NODEGROUP].update(groups)
11005

    
11006
  def CheckPrereq(self):
11007
    """Check prerequisites.
11008

11009
    """
11010
    assert self.needed_locks[locking.LEVEL_NODEGROUP]
11011
    assert (frozenset(self.glm.list_owned(locking.LEVEL_NODE)) ==
11012
            frozenset(self.op.nodes))
11013

    
11014
    expected_locks = (set([self.group_uuid]) |
11015
                      self.cfg.GetNodeGroupsFromNodes(self.op.nodes))
11016
    actual_locks = self.glm.list_owned(locking.LEVEL_NODEGROUP)
11017
    if actual_locks != expected_locks:
11018
      raise errors.OpExecError("Nodes changed groups since locks were acquired,"
11019
                               " current groups are '%s', used to be '%s'" %
11020
                               (utils.CommaJoin(expected_locks),
11021
                                utils.CommaJoin(actual_locks)))
11022

    
11023
    self.node_data = self.cfg.GetAllNodesInfo()
11024
    self.group = self.cfg.GetNodeGroup(self.group_uuid)
11025
    instance_data = self.cfg.GetAllInstancesInfo()
11026

    
11027
    if self.group is None:
11028
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
11029
                               (self.op.group_name, self.group_uuid))
11030

    
11031
    (new_splits, previous_splits) = \
11032
      self.CheckAssignmentForSplitInstances([(node, self.group_uuid)
11033
                                             for node in self.op.nodes],
11034
                                            self.node_data, instance_data)
11035

    
11036
    if new_splits:
11037
      fmt_new_splits = utils.CommaJoin(utils.NiceSort(new_splits))
11038

    
11039
      if not self.op.force:
11040
        raise errors.OpExecError("The following instances get split by this"
11041
                                 " change and --force was not given: %s" %
11042
                                 fmt_new_splits)
11043
      else:
11044
        self.LogWarning("This operation will split the following instances: %s",
11045
                        fmt_new_splits)
11046

    
11047
        if previous_splits:
11048
          self.LogWarning("In addition, these already-split instances continue"
11049
                          " to be split across groups: %s",
11050
                          utils.CommaJoin(utils.NiceSort(previous_splits)))
11051

    
11052
  def Exec(self, feedback_fn):
11053
    """Assign nodes to a new group.
11054

11055
    """
11056
    for node in self.op.nodes:
11057
      self.node_data[node].group = self.group_uuid
11058

    
11059
    # FIXME: Depends on side-effects of modifying the result of
11060
    # C{cfg.GetAllNodesInfo}
11061

    
11062
    self.cfg.Update(self.group, feedback_fn) # Saves all modified nodes.
11063

    
11064
  @staticmethod
11065
  def CheckAssignmentForSplitInstances(changes, node_data, instance_data):
11066
    """Check for split instances after a node assignment.
11067

11068
    This method considers a series of node assignments as an atomic operation,
11069
    and returns information about split instances after applying the set of
11070
    changes.
11071

11072
    In particular, it returns information about newly split instances, and
11073
    instances that were already split, and remain so after the change.
11074

11075
    Only instances whose disk template is listed in constants.DTS_INT_MIRROR are
11076
    considered.
11077

11078
    @type changes: list of (node_name, new_group_uuid) pairs.
11079
    @param changes: list of node assignments to consider.
11080
    @param node_data: a dict with data for all nodes
11081
    @param instance_data: a dict with all instances to consider
11082
    @rtype: a two-tuple
11083
    @return: a list of instances that were previously okay and result split as a
11084
      consequence of this change, and a list of instances that were previously
11085
      split and this change does not fix.
11086

11087
    """
11088
    changed_nodes = dict((node, group) for node, group in changes
11089
                         if node_data[node].group != group)
11090

    
11091
    all_split_instances = set()
11092
    previously_split_instances = set()
11093

    
11094
    def InstanceNodes(instance):
11095
      return [instance.primary_node] + list(instance.secondary_nodes)
11096

    
11097
    for inst in instance_data.values():
11098
      if inst.disk_template not in constants.DTS_INT_MIRROR:
11099
        continue
11100

    
11101
      instance_nodes = InstanceNodes(inst)
11102

    
11103
      if len(set(node_data[node].group for node in instance_nodes)) > 1:
11104
        previously_split_instances.add(inst.name)
11105

    
11106
      if len(set(changed_nodes.get(node, node_data[node].group)
11107
                 for node in instance_nodes)) > 1:
11108
        all_split_instances.add(inst.name)
11109

    
11110
    return (list(all_split_instances - previously_split_instances),
11111
            list(previously_split_instances & all_split_instances))
11112

    
11113

    
11114
class _GroupQuery(_QueryBase):
11115
  FIELDS = query.GROUP_FIELDS
11116

    
11117
  def ExpandNames(self, lu):
11118
    lu.needed_locks = {}
11119

    
11120
    self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
11121
    name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values())
11122

    
11123
    if not self.names:
11124
      self.wanted = [name_to_uuid[name]
11125
                     for name in utils.NiceSort(name_to_uuid.keys())]
11126
    else:
11127
      # Accept names to be either names or UUIDs.
11128
      missing = []
11129
      self.wanted = []
11130
      all_uuid = frozenset(self._all_groups.keys())
11131

    
11132
      for name in self.names:
11133
        if name in all_uuid:
11134
          self.wanted.append(name)
11135
        elif name in name_to_uuid:
11136
          self.wanted.append(name_to_uuid[name])
11137
        else:
11138
          missing.append(name)
11139

    
11140
      if missing:
11141
        raise errors.OpPrereqError("Some groups do not exist: %s" %
11142
                                   utils.CommaJoin(missing),
11143
                                   errors.ECODE_NOENT)
11144

    
11145
  def DeclareLocks(self, lu, level):
11146
    pass
11147

    
11148
  def _GetQueryData(self, lu):
11149
    """Computes the list of node groups and their attributes.
11150

11151
    """
11152
    do_nodes = query.GQ_NODE in self.requested_data
11153
    do_instances = query.GQ_INST in self.requested_data
11154

    
11155
    group_to_nodes = None
11156
    group_to_instances = None
11157

    
11158
    # For GQ_NODE, we need to map group->[nodes], and group->[instances] for
11159
    # GQ_INST. The former is attainable with just GetAllNodesInfo(), but for the
11160
    # latter GetAllInstancesInfo() is not enough, for we have to go through
11161
    # instance->node. Hence, we will need to process nodes even if we only need
11162
    # instance information.
11163
    if do_nodes or do_instances:
11164
      all_nodes = lu.cfg.GetAllNodesInfo()
11165
      group_to_nodes = dict((uuid, []) for uuid in self.wanted)
11166
      node_to_group = {}
11167

    
11168
      for node in all_nodes.values():
11169
        if node.group in group_to_nodes:
11170
          group_to_nodes[node.group].append(node.name)
11171
          node_to_group[node.name] = node.group
11172

    
11173
      if do_instances:
11174
        all_instances = lu.cfg.GetAllInstancesInfo()
11175
        group_to_instances = dict((uuid, []) for uuid in self.wanted)
11176

    
11177
        for instance in all_instances.values():
11178
          node = instance.primary_node
11179
          if node in node_to_group:
11180
            group_to_instances[node_to_group[node]].append(instance.name)
11181

    
11182
        if not do_nodes:
11183
          # Do not pass on node information if it was not requested.
11184
          group_to_nodes = None
11185

    
11186
    return query.GroupQueryData([self._all_groups[uuid]
11187
                                 for uuid in self.wanted],
11188
                                group_to_nodes, group_to_instances)
11189

    
11190

    
11191
class LUGroupQuery(NoHooksLU):
11192
  """Logical unit for querying node groups.
11193

11194
  """
11195
  REQ_BGL = False
11196

    
11197
  def CheckArguments(self):
11198
    self.gq = _GroupQuery(qlang.MakeSimpleFilter("name", self.op.names),
11199
                          self.op.output_fields, False)
11200

    
11201
  def ExpandNames(self):
11202
    self.gq.ExpandNames(self)
11203

    
11204
  def Exec(self, feedback_fn):
11205
    return self.gq.OldStyleQuery(self)
11206

    
11207

    
11208
class LUGroupSetParams(LogicalUnit):
11209
  """Modifies the parameters of a node group.
11210

11211
  """
11212
  HPATH = "group-modify"
11213
  HTYPE = constants.HTYPE_GROUP
11214
  REQ_BGL = False
11215

    
11216
  def CheckArguments(self):
11217
    all_changes = [
11218
      self.op.ndparams,
11219
      self.op.alloc_policy,
11220
      ]
11221

    
11222
    if all_changes.count(None) == len(all_changes):
11223
      raise errors.OpPrereqError("Please pass at least one modification",
11224
                                 errors.ECODE_INVAL)
11225

    
11226
  def ExpandNames(self):
11227
    # This raises errors.OpPrereqError on its own:
11228
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
11229

    
11230
    self.needed_locks = {
11231
      locking.LEVEL_NODEGROUP: [self.group_uuid],
11232
      }
11233

    
11234
  def CheckPrereq(self):
11235
    """Check prerequisites.
11236

11237
    """
11238
    self.group = self.cfg.GetNodeGroup(self.group_uuid)
11239

    
11240
    if self.group is None:
11241
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
11242
                               (self.op.group_name, self.group_uuid))
11243

    
11244
    if self.op.ndparams:
11245
      new_ndparams = _GetUpdatedParams(self.group.ndparams, self.op.ndparams)
11246
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
11247
      self.new_ndparams = new_ndparams
11248

    
11249
  def BuildHooksEnv(self):
11250
    """Build hooks env.
11251

11252
    """
11253
    return {
11254
      "GROUP_NAME": self.op.group_name,
11255
      "NEW_ALLOC_POLICY": self.op.alloc_policy,
11256
      }
11257

    
11258
  def BuildHooksNodes(self):
11259
    """Build hooks nodes.
11260

11261
    """
11262
    mn = self.cfg.GetMasterNode()
11263
    return ([mn], [mn])
11264

    
11265
  def Exec(self, feedback_fn):
11266
    """Modifies the node group.
11267

11268
    """
11269
    result = []
11270

    
11271
    if self.op.ndparams:
11272
      self.group.ndparams = self.new_ndparams
11273
      result.append(("ndparams", str(self.group.ndparams)))
11274

    
11275
    if self.op.alloc_policy:
11276
      self.group.alloc_policy = self.op.alloc_policy
11277

    
11278
    self.cfg.Update(self.group, feedback_fn)
11279
    return result
11280

    
11281

    
11282

    
11283
class LUGroupRemove(LogicalUnit):
11284
  HPATH = "group-remove"
11285
  HTYPE = constants.HTYPE_GROUP
11286
  REQ_BGL = False
11287

    
11288
  def ExpandNames(self):
11289
    # This will raises errors.OpPrereqError on its own:
11290
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
11291
    self.needed_locks = {
11292
      locking.LEVEL_NODEGROUP: [self.group_uuid],
11293
      }
11294

    
11295
  def CheckPrereq(self):
11296
    """Check prerequisites.
11297

11298
    This checks that the given group name exists as a node group, that is
11299
    empty (i.e., contains no nodes), and that is not the last group of the
11300
    cluster.
11301

11302
    """
11303
    # Verify that the group is empty.
11304
    group_nodes = [node.name
11305
                   for node in self.cfg.GetAllNodesInfo().values()
11306
                   if node.group == self.group_uuid]
11307

    
11308
    if group_nodes:
11309
      raise errors.OpPrereqError("Group '%s' not empty, has the following"
11310
                                 " nodes: %s" %
11311
                                 (self.op.group_name,
11312
                                  utils.CommaJoin(utils.NiceSort(group_nodes))),
11313
                                 errors.ECODE_STATE)
11314

    
11315
    # Verify the cluster would not be left group-less.
11316
    if len(self.cfg.GetNodeGroupList()) == 1:
11317
      raise errors.OpPrereqError("Group '%s' is the only group,"
11318
                                 " cannot be removed" %
11319
                                 self.op.group_name,
11320
                                 errors.ECODE_STATE)
11321

    
11322
  def BuildHooksEnv(self):
11323
    """Build hooks env.
11324

11325
    """
11326
    return {
11327
      "GROUP_NAME": self.op.group_name,
11328
      }
11329

    
11330
  def BuildHooksNodes(self):
11331
    """Build hooks nodes.
11332

11333
    """
11334
    mn = self.cfg.GetMasterNode()
11335
    return ([mn], [mn])
11336

    
11337
  def Exec(self, feedback_fn):
11338
    """Remove the node group.
11339

11340
    """
11341
    try:
11342
      self.cfg.RemoveNodeGroup(self.group_uuid)
11343
    except errors.ConfigurationError:
11344
      raise errors.OpExecError("Group '%s' with UUID %s disappeared" %
11345
                               (self.op.group_name, self.group_uuid))
11346

    
11347
    self.remove_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
11348

    
11349

    
11350
class LUGroupRename(LogicalUnit):
11351
  HPATH = "group-rename"
11352
  HTYPE = constants.HTYPE_GROUP
11353
  REQ_BGL = False
11354

    
11355
  def ExpandNames(self):
11356
    # This raises errors.OpPrereqError on its own:
11357
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
11358

    
11359
    self.needed_locks = {
11360
      locking.LEVEL_NODEGROUP: [self.group_uuid],
11361
      }
11362

    
11363
  def CheckPrereq(self):
11364
    """Check prerequisites.
11365

11366
    Ensures requested new name is not yet used.
11367

11368
    """
11369
    try:
11370
      new_name_uuid = self.cfg.LookupNodeGroup(self.op.new_name)
11371
    except errors.OpPrereqError:
11372
      pass
11373
    else:
11374
      raise errors.OpPrereqError("Desired new name '%s' clashes with existing"
11375
                                 " node group (UUID: %s)" %
11376
                                 (self.op.new_name, new_name_uuid),
11377
                                 errors.ECODE_EXISTS)
11378

    
11379
  def BuildHooksEnv(self):
11380
    """Build hooks env.
11381

11382
    """
11383
    return {
11384
      "OLD_NAME": self.op.group_name,
11385
      "NEW_NAME": self.op.new_name,
11386
      }
11387

    
11388
  def BuildHooksNodes(self):
11389
    """Build hooks nodes.
11390

11391
    """
11392
    mn = self.cfg.GetMasterNode()
11393

    
11394
    all_nodes = self.cfg.GetAllNodesInfo()
11395
    all_nodes.pop(mn, None)
11396

    
11397
    run_nodes = [mn]
11398
    run_nodes.extend(node.name for node in all_nodes.values()
11399
                     if node.group == self.group_uuid)
11400

    
11401
    return (run_nodes, run_nodes)
11402

    
11403
  def Exec(self, feedback_fn):
11404
    """Rename the node group.
11405

11406
    """
11407
    group = self.cfg.GetNodeGroup(self.group_uuid)
11408

    
11409
    if group is None:
11410
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
11411
                               (self.op.group_name, self.group_uuid))
11412

    
11413
    group.name = self.op.new_name
11414
    self.cfg.Update(group, feedback_fn)
11415

    
11416
    return self.op.new_name
11417

    
11418

    
11419
class TagsLU(NoHooksLU): # pylint: disable-msg=W0223
11420
  """Generic tags LU.
11421

11422
  This is an abstract class which is the parent of all the other tags LUs.
11423

11424
  """
11425
  def ExpandNames(self):
11426
    self.group_uuid = None
11427
    self.needed_locks = {}
11428
    if self.op.kind == constants.TAG_NODE:
11429
      self.op.name = _ExpandNodeName(self.cfg, self.op.name)
11430
      self.needed_locks[locking.LEVEL_NODE] = self.op.name
11431
    elif self.op.kind == constants.TAG_INSTANCE:
11432
      self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
11433
      self.needed_locks[locking.LEVEL_INSTANCE] = self.op.name
11434
    elif self.op.kind == constants.TAG_NODEGROUP:
11435
      self.group_uuid = self.cfg.LookupNodeGroup(self.op.name)
11436

    
11437
    # FIXME: Acquire BGL for cluster tag operations (as of this writing it's
11438
    # not possible to acquire the BGL based on opcode parameters)
11439

    
11440
  def CheckPrereq(self):
11441
    """Check prerequisites.
11442

11443
    """
11444
    if self.op.kind == constants.TAG_CLUSTER:
11445
      self.target = self.cfg.GetClusterInfo()
11446
    elif self.op.kind == constants.TAG_NODE:
11447
      self.target = self.cfg.GetNodeInfo(self.op.name)
11448
    elif self.op.kind == constants.TAG_INSTANCE:
11449
      self.target = self.cfg.GetInstanceInfo(self.op.name)
11450
    elif self.op.kind == constants.TAG_NODEGROUP:
11451
      self.target = self.cfg.GetNodeGroup(self.group_uuid)
11452
    else:
11453
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
11454
                                 str(self.op.kind), errors.ECODE_INVAL)
11455

    
11456

    
11457
class LUTagsGet(TagsLU):
11458
  """Returns the tags of a given object.
11459

11460
  """
11461
  REQ_BGL = False
11462

    
11463
  def ExpandNames(self):
11464
    TagsLU.ExpandNames(self)
11465

    
11466
    # Share locks as this is only a read operation
11467
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
11468

    
11469
  def Exec(self, feedback_fn):
11470
    """Returns the tag list.
11471

11472
    """
11473
    return list(self.target.GetTags())
11474

    
11475

    
11476
class LUTagsSearch(NoHooksLU):
11477
  """Searches the tags for a given pattern.
11478

11479
  """
11480
  REQ_BGL = False
11481

    
11482
  def ExpandNames(self):
11483
    self.needed_locks = {}
11484

    
11485
  def CheckPrereq(self):
11486
    """Check prerequisites.
11487

11488
    This checks the pattern passed for validity by compiling it.
11489

11490
    """
11491
    try:
11492
      self.re = re.compile(self.op.pattern)
11493
    except re.error, err:
11494
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
11495
                                 (self.op.pattern, err), errors.ECODE_INVAL)
11496

    
11497
  def Exec(self, feedback_fn):
11498
    """Returns the tag list.
11499

11500
    """
11501
    cfg = self.cfg
11502
    tgts = [("/cluster", cfg.GetClusterInfo())]
11503
    ilist = cfg.GetAllInstancesInfo().values()
11504
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
11505
    nlist = cfg.GetAllNodesInfo().values()
11506
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
11507
    tgts.extend(("/nodegroup/%s" % n.name, n)
11508
                for n in cfg.GetAllNodeGroupsInfo().values())
11509
    results = []
11510
    for path, target in tgts:
11511
      for tag in target.GetTags():
11512
        if self.re.search(tag):
11513
          results.append((path, tag))
11514
    return results
11515

    
11516

    
11517
class LUTagsSet(TagsLU):
11518
  """Sets a tag on a given object.
11519

11520
  """
11521
  REQ_BGL = False
11522

    
11523
  def CheckPrereq(self):
11524
    """Check prerequisites.
11525

11526
    This checks the type and length of the tag name and value.
11527

11528
    """
11529
    TagsLU.CheckPrereq(self)
11530
    for tag in self.op.tags:
11531
      objects.TaggableObject.ValidateTag(tag)
11532

    
11533
  def Exec(self, feedback_fn):
11534
    """Sets the tag.
11535

11536
    """
11537
    try:
11538
      for tag in self.op.tags:
11539
        self.target.AddTag(tag)
11540
    except errors.TagError, err:
11541
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
11542
    self.cfg.Update(self.target, feedback_fn)
11543

    
11544

    
11545
class LUTagsDel(TagsLU):
11546
  """Delete a list of tags from a given object.
11547

11548
  """
11549
  REQ_BGL = False
11550

    
11551
  def CheckPrereq(self):
11552
    """Check prerequisites.
11553

11554
    This checks that we have the given tag.
11555

11556
    """
11557
    TagsLU.CheckPrereq(self)
11558
    for tag in self.op.tags:
11559
      objects.TaggableObject.ValidateTag(tag)
11560
    del_tags = frozenset(self.op.tags)
11561
    cur_tags = self.target.GetTags()
11562

    
11563
    diff_tags = del_tags - cur_tags
11564
    if diff_tags:
11565
      diff_names = ("'%s'" % i for i in sorted(diff_tags))
11566
      raise errors.OpPrereqError("Tag(s) %s not found" %
11567
                                 (utils.CommaJoin(diff_names), ),
11568
                                 errors.ECODE_NOENT)
11569

    
11570
  def Exec(self, feedback_fn):
11571
    """Remove the tag from the object.
11572

11573
    """
11574
    for tag in self.op.tags:
11575
      self.target.RemoveTag(tag)
11576
    self.cfg.Update(self.target, feedback_fn)
11577

    
11578

    
11579
class LUTestDelay(NoHooksLU):
11580
  """Sleep for a specified amount of time.
11581

11582
  This LU sleeps on the master and/or nodes for a specified amount of
11583
  time.
11584

11585
  """
11586
  REQ_BGL = False
11587

    
11588
  def ExpandNames(self):
11589
    """Expand names and set required locks.
11590

11591
    This expands the node list, if any.
11592

11593
    """
11594
    self.needed_locks = {}
11595
    if self.op.on_nodes:
11596
      # _GetWantedNodes can be used here, but is not always appropriate to use
11597
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
11598
      # more information.
11599
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
11600
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
11601

    
11602
  def _TestDelay(self):
11603
    """Do the actual sleep.
11604

11605
    """
11606
    if self.op.on_master:
11607
      if not utils.TestDelay(self.op.duration):
11608
        raise errors.OpExecError("Error during master delay test")
11609
    if self.op.on_nodes:
11610
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
11611
      for node, node_result in result.items():
11612
        node_result.Raise("Failure during rpc call to node %s" % node)
11613

    
11614
  def Exec(self, feedback_fn):
11615
    """Execute the test delay opcode, with the wanted repetitions.
11616

11617
    """
11618
    if self.op.repeat == 0:
11619
      self._TestDelay()
11620
    else:
11621
      top_value = self.op.repeat - 1
11622
      for i in range(self.op.repeat):
11623
        self.LogInfo("Test delay iteration %d/%d" % (i, top_value))
11624
        self._TestDelay()
11625

    
11626

    
11627
class LUTestJqueue(NoHooksLU):
11628
  """Utility LU to test some aspects of the job queue.
11629

11630
  """
11631
  REQ_BGL = False
11632

    
11633
  # Must be lower than default timeout for WaitForJobChange to see whether it
11634
  # notices changed jobs
11635
  _CLIENT_CONNECT_TIMEOUT = 20.0
11636
  _CLIENT_CONFIRM_TIMEOUT = 60.0
11637

    
11638
  @classmethod
11639
  def _NotifyUsingSocket(cls, cb, errcls):
11640
    """Opens a Unix socket and waits for another program to connect.
11641

11642
    @type cb: callable
11643
    @param cb: Callback to send socket name to client
11644
    @type errcls: class
11645
    @param errcls: Exception class to use for errors
11646

11647
    """
11648
    # Using a temporary directory as there's no easy way to create temporary
11649
    # sockets without writing a custom loop around tempfile.mktemp and
11650
    # socket.bind
11651
    tmpdir = tempfile.mkdtemp()
11652
    try:
11653
      tmpsock = utils.PathJoin(tmpdir, "sock")
11654

    
11655
      logging.debug("Creating temporary socket at %s", tmpsock)
11656
      sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
11657
      try:
11658
        sock.bind(tmpsock)
11659
        sock.listen(1)
11660

    
11661
        # Send details to client
11662
        cb(tmpsock)
11663

    
11664
        # Wait for client to connect before continuing
11665
        sock.settimeout(cls._CLIENT_CONNECT_TIMEOUT)
11666
        try:
11667
          (conn, _) = sock.accept()
11668
        except socket.error, err:
11669
          raise errcls("Client didn't connect in time (%s)" % err)
11670
      finally:
11671
        sock.close()
11672
    finally:
11673
      # Remove as soon as client is connected
11674
      shutil.rmtree(tmpdir)
11675

    
11676
    # Wait for client to close
11677
    try:
11678
      try:
11679
        # pylint: disable-msg=E1101
11680
        # Instance of '_socketobject' has no ... member
11681
        conn.settimeout(cls._CLIENT_CONFIRM_TIMEOUT)
11682
        conn.recv(1)
11683
      except socket.error, err:
11684
        raise errcls("Client failed to confirm notification (%s)" % err)
11685
    finally:
11686
      conn.close()
11687

    
11688
  def _SendNotification(self, test, arg, sockname):
11689
    """Sends a notification to the client.
11690

11691
    @type test: string
11692
    @param test: Test name
11693
    @param arg: Test argument (depends on test)
11694
    @type sockname: string
11695
    @param sockname: Socket path
11696

11697
    """
11698
    self.Log(constants.ELOG_JQUEUE_TEST, (sockname, test, arg))
11699

    
11700
  def _Notify(self, prereq, test, arg):
11701
    """Notifies the client of a test.
11702

11703
    @type prereq: bool
11704
    @param prereq: Whether this is a prereq-phase test
11705
    @type test: string
11706
    @param test: Test name
11707
    @param arg: Test argument (depends on test)
11708

11709
    """
11710
    if prereq:
11711
      errcls = errors.OpPrereqError
11712
    else:
11713
      errcls = errors.OpExecError
11714

    
11715
    return self._NotifyUsingSocket(compat.partial(self._SendNotification,
11716
                                                  test, arg),
11717
                                   errcls)
11718

    
11719
  def CheckArguments(self):
11720
    self.checkargs_calls = getattr(self, "checkargs_calls", 0) + 1
11721
    self.expandnames_calls = 0
11722

    
11723
  def ExpandNames(self):
11724
    checkargs_calls = getattr(self, "checkargs_calls", 0)
11725
    if checkargs_calls < 1:
11726
      raise errors.ProgrammerError("CheckArguments was not called")
11727

    
11728
    self.expandnames_calls += 1
11729

    
11730
    if self.op.notify_waitlock:
11731
      self._Notify(True, constants.JQT_EXPANDNAMES, None)
11732

    
11733
    self.LogInfo("Expanding names")
11734

    
11735
    # Get lock on master node (just to get a lock, not for a particular reason)
11736
    self.needed_locks = {
11737
      locking.LEVEL_NODE: self.cfg.GetMasterNode(),
11738
      }
11739

    
11740
  def Exec(self, feedback_fn):
11741
    if self.expandnames_calls < 1:
11742
      raise errors.ProgrammerError("ExpandNames was not called")
11743

    
11744
    if self.op.notify_exec:
11745
      self._Notify(False, constants.JQT_EXEC, None)
11746

    
11747
    self.LogInfo("Executing")
11748

    
11749
    if self.op.log_messages:
11750
      self._Notify(False, constants.JQT_STARTMSG, len(self.op.log_messages))
11751
      for idx, msg in enumerate(self.op.log_messages):
11752
        self.LogInfo("Sending log message %s", idx + 1)
11753
        feedback_fn(constants.JQT_MSGPREFIX + msg)
11754
        # Report how many test messages have been sent
11755
        self._Notify(False, constants.JQT_LOGMSG, idx + 1)
11756

    
11757
    if self.op.fail:
11758
      raise errors.OpExecError("Opcode failure was requested")
11759

    
11760
    return True
11761

    
11762

    
11763
class IAllocator(object):
11764
  """IAllocator framework.
11765

11766
  An IAllocator instance has three sets of attributes:
11767
    - cfg that is needed to query the cluster
11768
    - input data (all members of the _KEYS class attribute are required)
11769
    - four buffer attributes (in|out_data|text), that represent the
11770
      input (to the external script) in text and data structure format,
11771
      and the output from it, again in two formats
11772
    - the result variables from the script (success, info, nodes) for
11773
      easy usage
11774

11775
  """
11776
  # pylint: disable-msg=R0902
11777
  # lots of instance attributes
11778

    
11779
  def __init__(self, cfg, rpc, mode, **kwargs):
11780
    self.cfg = cfg
11781
    self.rpc = rpc
11782
    # init buffer variables
11783
    self.in_text = self.out_text = self.in_data = self.out_data = None
11784
    # init all input fields so that pylint is happy
11785
    self.mode = mode
11786
    self.mem_size = self.disks = self.disk_template = None
11787
    self.os = self.tags = self.nics = self.vcpus = None
11788
    self.hypervisor = None
11789
    self.relocate_from = None
11790
    self.name = None
11791
    self.evac_nodes = None
11792
    self.instances = None
11793
    self.reloc_mode = None
11794
    self.target_groups = None
11795
    # computed fields
11796
    self.required_nodes = None
11797
    # init result fields
11798
    self.success = self.info = self.result = None
11799

    
11800
    try:
11801
      (fn, keyset) = self._MODE_DATA[self.mode]
11802
    except KeyError:
11803
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
11804
                                   " IAllocator" % self.mode)
11805

    
11806
    for key in kwargs:
11807
      if key not in keyset:
11808
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
11809
                                     " IAllocator" % key)
11810
      setattr(self, key, kwargs[key])
11811

    
11812
    for key in keyset:
11813
      if key not in kwargs:
11814
        raise errors.ProgrammerError("Missing input parameter '%s' to"
11815
                                     " IAllocator" % key)
11816
    self._BuildInputData(compat.partial(fn, self))
11817

    
11818
  def _ComputeClusterData(self):
11819
    """Compute the generic allocator input data.
11820

11821
    This is the data that is independent of the actual operation.
11822

11823
    """
11824
    cfg = self.cfg
11825
    cluster_info = cfg.GetClusterInfo()
11826
    # cluster data
11827
    data = {
11828
      "version": constants.IALLOCATOR_VERSION,
11829
      "cluster_name": cfg.GetClusterName(),
11830
      "cluster_tags": list(cluster_info.GetTags()),
11831
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
11832
      # we don't have job IDs
11833
      }
11834
    ninfo = cfg.GetAllNodesInfo()
11835
    iinfo = cfg.GetAllInstancesInfo().values()
11836
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
11837

    
11838
    # node data
11839
    node_list = [n.name for n in ninfo.values() if n.vm_capable]
11840

    
11841
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
11842
      hypervisor_name = self.hypervisor
11843
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
11844
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
11845
    elif self.mode in (constants.IALLOCATOR_MODE_MEVAC,
11846
                       constants.IALLOCATOR_MODE_MRELOC):
11847
      hypervisor_name = cluster_info.enabled_hypervisors[0]
11848

    
11849
    node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
11850
                                        hypervisor_name)
11851
    node_iinfo = \
11852
      self.rpc.call_all_instances_info(node_list,
11853
                                       cluster_info.enabled_hypervisors)
11854

    
11855
    data["nodegroups"] = self._ComputeNodeGroupData(cfg)
11856

    
11857
    config_ndata = self._ComputeBasicNodeData(ninfo)
11858
    data["nodes"] = self._ComputeDynamicNodeData(ninfo, node_data, node_iinfo,
11859
                                                 i_list, config_ndata)
11860
    assert len(data["nodes"]) == len(ninfo), \
11861
        "Incomplete node data computed"
11862

    
11863
    data["instances"] = self._ComputeInstanceData(cluster_info, i_list)
11864

    
11865
    self.in_data = data
11866

    
11867
  @staticmethod
11868
  def _ComputeNodeGroupData(cfg):
11869
    """Compute node groups data.
11870

11871
    """
11872
    ng = dict((guuid, {
11873
      "name": gdata.name,
11874
      "alloc_policy": gdata.alloc_policy,
11875
      })
11876
      for guuid, gdata in cfg.GetAllNodeGroupsInfo().items())
11877

    
11878
    return ng
11879

    
11880
  @staticmethod
11881
  def _ComputeBasicNodeData(node_cfg):
11882
    """Compute global node data.
11883

11884
    @rtype: dict
11885
    @returns: a dict of name: (node dict, node config)
11886

11887
    """
11888
    # fill in static (config-based) values
11889
    node_results = dict((ninfo.name, {
11890
      "tags": list(ninfo.GetTags()),
11891
      "primary_ip": ninfo.primary_ip,
11892
      "secondary_ip": ninfo.secondary_ip,
11893
      "offline": ninfo.offline,
11894
      "drained": ninfo.drained,
11895
      "master_candidate": ninfo.master_candidate,
11896
      "group": ninfo.group,
11897
      "master_capable": ninfo.master_capable,
11898
      "vm_capable": ninfo.vm_capable,
11899
      })
11900
      for ninfo in node_cfg.values())
11901

    
11902
    return node_results
11903

    
11904
  @staticmethod
11905
  def _ComputeDynamicNodeData(node_cfg, node_data, node_iinfo, i_list,
11906
                              node_results):
11907
    """Compute global node data.
11908

11909
    @param node_results: the basic node structures as filled from the config
11910

11911
    """
11912
    # make a copy of the current dict
11913
    node_results = dict(node_results)
11914
    for nname, nresult in node_data.items():
11915
      assert nname in node_results, "Missing basic data for node %s" % nname
11916
      ninfo = node_cfg[nname]
11917

    
11918
      if not (ninfo.offline or ninfo.drained):
11919
        nresult.Raise("Can't get data for node %s" % nname)
11920
        node_iinfo[nname].Raise("Can't get node instance info from node %s" %
11921
                                nname)
11922
        remote_info = nresult.payload
11923

    
11924
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
11925
                     'vg_size', 'vg_free', 'cpu_total']:
11926
          if attr not in remote_info:
11927
            raise errors.OpExecError("Node '%s' didn't return attribute"
11928
                                     " '%s'" % (nname, attr))
11929
          if not isinstance(remote_info[attr], int):
11930
            raise errors.OpExecError("Node '%s' returned invalid value"
11931
                                     " for '%s': %s" %
11932
                                     (nname, attr, remote_info[attr]))
11933
        # compute memory used by primary instances
11934
        i_p_mem = i_p_up_mem = 0
11935
        for iinfo, beinfo in i_list:
11936
          if iinfo.primary_node == nname:
11937
            i_p_mem += beinfo[constants.BE_MEMORY]
11938
            if iinfo.name not in node_iinfo[nname].payload:
11939
              i_used_mem = 0
11940
            else:
11941
              i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory'])
11942
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
11943
            remote_info['memory_free'] -= max(0, i_mem_diff)
11944

    
11945
            if iinfo.admin_up:
11946
              i_p_up_mem += beinfo[constants.BE_MEMORY]
11947

    
11948
        # compute memory used by instances
11949
        pnr_dyn = {
11950
          "total_memory": remote_info['memory_total'],
11951
          "reserved_memory": remote_info['memory_dom0'],
11952
          "free_memory": remote_info['memory_free'],
11953
          "total_disk": remote_info['vg_size'],
11954
          "free_disk": remote_info['vg_free'],
11955
          "total_cpus": remote_info['cpu_total'],
11956
          "i_pri_memory": i_p_mem,
11957
          "i_pri_up_memory": i_p_up_mem,
11958
          }
11959
        pnr_dyn.update(node_results[nname])
11960
        node_results[nname] = pnr_dyn
11961

    
11962
    return node_results
11963

    
11964
  @staticmethod
11965
  def _ComputeInstanceData(cluster_info, i_list):
11966
    """Compute global instance data.
11967

11968
    """
11969
    instance_data = {}
11970
    for iinfo, beinfo in i_list:
11971
      nic_data = []
11972
      for nic in iinfo.nics:
11973
        filled_params = cluster_info.SimpleFillNIC(nic.nicparams)
11974
        nic_dict = {
11975
          "mac": nic.mac,
11976
          "ip": nic.ip,
11977
          "mode": filled_params[constants.NIC_MODE],
11978
          "link": filled_params[constants.NIC_LINK],
11979
          }
11980
        if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
11981
          nic_dict["bridge"] = filled_params[constants.NIC_LINK]
11982
        nic_data.append(nic_dict)
11983
      pir = {
11984
        "tags": list(iinfo.GetTags()),
11985
        "admin_up": iinfo.admin_up,
11986
        "vcpus": beinfo[constants.BE_VCPUS],
11987
        "memory": beinfo[constants.BE_MEMORY],
11988
        "os": iinfo.os,
11989
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
11990
        "nics": nic_data,
11991
        "disks": [{constants.IDISK_SIZE: dsk.size,
11992
                   constants.IDISK_MODE: dsk.mode}
11993
                  for dsk in iinfo.disks],
11994
        "disk_template": iinfo.disk_template,
11995
        "hypervisor": iinfo.hypervisor,
11996
        }
11997
      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
11998
                                                 pir["disks"])
11999
      instance_data[iinfo.name] = pir
12000

    
12001
    return instance_data
12002

    
12003
  def _AddNewInstance(self):
12004
    """Add new instance data to allocator structure.
12005

12006
    This in combination with _AllocatorGetClusterData will create the
12007
    correct structure needed as input for the allocator.
12008

12009
    The checks for the completeness of the opcode must have already been
12010
    done.
12011

12012
    """
12013
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
12014

    
12015
    if self.disk_template in constants.DTS_INT_MIRROR:
12016
      self.required_nodes = 2
12017
    else:
12018
      self.required_nodes = 1
12019

    
12020
    request = {
12021
      "name": self.name,
12022
      "disk_template": self.disk_template,
12023
      "tags": self.tags,
12024
      "os": self.os,
12025
      "vcpus": self.vcpus,
12026
      "memory": self.mem_size,
12027
      "disks": self.disks,
12028
      "disk_space_total": disk_space,
12029
      "nics": self.nics,
12030
      "required_nodes": self.required_nodes,
12031
      }
12032

    
12033
    return request
12034

    
12035
  def _AddRelocateInstance(self):
12036
    """Add relocate instance data to allocator structure.
12037

12038
    This in combination with _IAllocatorGetClusterData will create the
12039
    correct structure needed as input for the allocator.
12040

12041
    The checks for the completeness of the opcode must have already been
12042
    done.
12043

12044
    """
12045
    instance = self.cfg.GetInstanceInfo(self.name)
12046
    if instance is None:
12047
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
12048
                                   " IAllocator" % self.name)
12049

    
12050
    if instance.disk_template not in constants.DTS_MIRRORED:
12051
      raise errors.OpPrereqError("Can't relocate non-mirrored instances",
12052
                                 errors.ECODE_INVAL)
12053

    
12054
    if instance.disk_template in constants.DTS_INT_MIRROR and \
12055
        len(instance.secondary_nodes) != 1:
12056
      raise errors.OpPrereqError("Instance has not exactly one secondary node",
12057
                                 errors.ECODE_STATE)
12058

    
12059
    self.required_nodes = 1
12060
    disk_sizes = [{constants.IDISK_SIZE: disk.size} for disk in instance.disks]
12061
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
12062

    
12063
    request = {
12064
      "name": self.name,
12065
      "disk_space_total": disk_space,
12066
      "required_nodes": self.required_nodes,
12067
      "relocate_from": self.relocate_from,
12068
      }
12069
    return request
12070

    
12071
  def _AddEvacuateNodes(self):
12072
    """Add evacuate nodes data to allocator structure.
12073

12074
    """
12075
    request = {
12076
      "evac_nodes": self.evac_nodes
12077
      }
12078
    return request
12079

    
12080
  def _AddMultiRelocate(self):
12081
    """Get data for multi-relocate requests.
12082

12083
    """
12084
    return {
12085
      "instances": self.instances,
12086
      "reloc_mode": self.reloc_mode,
12087
      "target_groups": self.target_groups,
12088
      }
12089

    
12090
  def _BuildInputData(self, fn):
12091
    """Build input data structures.
12092

12093
    """
12094
    self._ComputeClusterData()
12095

    
12096
    request = fn()
12097
    request["type"] = self.mode
12098
    self.in_data["request"] = request
12099

    
12100
    self.in_text = serializer.Dump(self.in_data)
12101

    
12102
  _MODE_DATA = {
12103
    constants.IALLOCATOR_MODE_ALLOC:
12104
      (_AddNewInstance,
12105
       ["name", "mem_size", "disks", "disk_template", "os", "tags", "nics",
12106
        "vcpus", "hypervisor"]),
12107
    constants.IALLOCATOR_MODE_RELOC:
12108
      (_AddRelocateInstance, ["name", "relocate_from"]),
12109
    constants.IALLOCATOR_MODE_MEVAC:
12110
      (_AddEvacuateNodes, ["evac_nodes"]),
12111
    constants.IALLOCATOR_MODE_MRELOC:
12112
      (_AddMultiRelocate, ["instances", "reloc_mode", "target_groups"]),
12113
    }
12114

    
12115
  def Run(self, name, validate=True, call_fn=None):
12116
    """Run an instance allocator and return the results.
12117

12118
    """
12119
    if call_fn is None:
12120
      call_fn = self.rpc.call_iallocator_runner
12121

    
12122
    result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
12123
    result.Raise("Failure while running the iallocator script")
12124

    
12125
    self.out_text = result.payload
12126
    if validate:
12127
      self._ValidateResult()
12128

    
12129
  def _ValidateResult(self):
12130
    """Process the allocator results.
12131

12132
    This will process and if successful save the result in
12133
    self.out_data and the other parameters.
12134

12135
    """
12136
    try:
12137
      rdict = serializer.Load(self.out_text)
12138
    except Exception, err:
12139
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
12140

    
12141
    if not isinstance(rdict, dict):
12142
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
12143

    
12144
    # TODO: remove backwards compatiblity in later versions
12145
    if "nodes" in rdict and "result" not in rdict:
12146
      rdict["result"] = rdict["nodes"]
12147
      del rdict["nodes"]
12148

    
12149
    for key in "success", "info", "result":
12150
      if key not in rdict:
12151
        raise errors.OpExecError("Can't parse iallocator results:"
12152
                                 " missing key '%s'" % key)
12153
      setattr(self, key, rdict[key])
12154

    
12155
    if not isinstance(rdict["result"], list):
12156
      raise errors.OpExecError("Can't parse iallocator results: 'result' key"
12157
                               " is not a list")
12158

    
12159
    if self.mode == constants.IALLOCATOR_MODE_RELOC:
12160
      assert self.relocate_from is not None
12161
      assert self.required_nodes == 1
12162

    
12163
      node2group = dict((name, ndata["group"])
12164
                        for (name, ndata) in self.in_data["nodes"].items())
12165

    
12166
      fn = compat.partial(self._NodesToGroups, node2group,
12167
                          self.in_data["nodegroups"])
12168

    
12169
      request_groups = fn(self.relocate_from)
12170
      result_groups = fn(rdict["result"])
12171

    
12172
      if result_groups != request_groups:
12173
        raise errors.OpExecError("Groups of nodes returned by iallocator (%s)"
12174
                                 " differ from original groups (%s)" %
12175
                                 (utils.CommaJoin(result_groups),
12176
                                  utils.CommaJoin(request_groups)))
12177

    
12178
    self.out_data = rdict
12179

    
12180
  @staticmethod
12181
  def _NodesToGroups(node2group, groups, nodes):
12182
    """Returns a list of unique group names for a list of nodes.
12183

12184
    @type node2group: dict
12185
    @param node2group: Map from node name to group UUID
12186
    @type groups: dict
12187
    @param groups: Group information
12188
    @type nodes: list
12189
    @param nodes: Node names
12190

12191
    """
12192
    result = set()
12193

    
12194
    for node in nodes:
12195
      try:
12196
        group_uuid = node2group[node]
12197
      except KeyError:
12198
        # Ignore unknown node
12199
        pass
12200
      else:
12201
        try:
12202
          group = groups[group_uuid]
12203
        except KeyError:
12204
          # Can't find group, let's use UUID
12205
          group_name = group_uuid
12206
        else:
12207
          group_name = group["name"]
12208

    
12209
        result.add(group_name)
12210

    
12211
    return sorted(result)
12212

    
12213

    
12214
class LUTestAllocator(NoHooksLU):
12215
  """Run allocator tests.
12216

12217
  This LU runs the allocator tests
12218

12219
  """
12220
  def CheckPrereq(self):
12221
    """Check prerequisites.
12222

12223
    This checks the opcode parameters depending on the director and mode test.
12224

12225
    """
12226
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
12227
      for attr in ["mem_size", "disks", "disk_template",
12228
                   "os", "tags", "nics", "vcpus"]:
12229
        if not hasattr(self.op, attr):
12230
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
12231
                                     attr, errors.ECODE_INVAL)
12232
      iname = self.cfg.ExpandInstanceName(self.op.name)
12233
      if iname is not None:
12234
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
12235
                                   iname, errors.ECODE_EXISTS)
12236
      if not isinstance(self.op.nics, list):
12237
        raise errors.OpPrereqError("Invalid parameter 'nics'",
12238
                                   errors.ECODE_INVAL)
12239
      if not isinstance(self.op.disks, list):
12240
        raise errors.OpPrereqError("Invalid parameter 'disks'",
12241
                                   errors.ECODE_INVAL)
12242
      for row in self.op.disks:
12243
        if (not isinstance(row, dict) or
12244
            "size" not in row or
12245
            not isinstance(row["size"], int) or
12246
            "mode" not in row or
12247
            row["mode"] not in ['r', 'w']):
12248
          raise errors.OpPrereqError("Invalid contents of the 'disks'"
12249
                                     " parameter", errors.ECODE_INVAL)
12250
      if self.op.hypervisor is None:
12251
        self.op.hypervisor = self.cfg.GetHypervisorType()
12252
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
12253
      fname = _ExpandInstanceName(self.cfg, self.op.name)
12254
      self.op.name = fname
12255
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
12256
    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
12257
      if not hasattr(self.op, "evac_nodes"):
12258
        raise errors.OpPrereqError("Missing attribute 'evac_nodes' on"
12259
                                   " opcode input", errors.ECODE_INVAL)
12260
    else:
12261
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
12262
                                 self.op.mode, errors.ECODE_INVAL)
12263

    
12264
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
12265
      if self.op.allocator is None:
12266
        raise errors.OpPrereqError("Missing allocator name",
12267
                                   errors.ECODE_INVAL)
12268
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
12269
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
12270
                                 self.op.direction, errors.ECODE_INVAL)
12271

    
12272
  def Exec(self, feedback_fn):
12273
    """Run the allocator test.
12274

12275
    """
12276
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
12277
      ial = IAllocator(self.cfg, self.rpc,
12278
                       mode=self.op.mode,
12279
                       name=self.op.name,
12280
                       mem_size=self.op.mem_size,
12281
                       disks=self.op.disks,
12282
                       disk_template=self.op.disk_template,
12283
                       os=self.op.os,
12284
                       tags=self.op.tags,
12285
                       nics=self.op.nics,
12286
                       vcpus=self.op.vcpus,
12287
                       hypervisor=self.op.hypervisor,
12288
                       )
12289
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
12290
      ial = IAllocator(self.cfg, self.rpc,
12291
                       mode=self.op.mode,
12292
                       name=self.op.name,
12293
                       relocate_from=list(self.relocate_from),
12294
                       )
12295
    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
12296
      ial = IAllocator(self.cfg, self.rpc,
12297
                       mode=self.op.mode,
12298
                       evac_nodes=self.op.evac_nodes)
12299
    else:
12300
      raise errors.ProgrammerError("Uncatched mode %s in"
12301
                                   " LUTestAllocator.Exec", self.op.mode)
12302

    
12303
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
12304
      result = ial.in_text
12305
    else:
12306
      ial.Run(self.op.allocator, validate=False)
12307
      result = ial.out_text
12308
    return result
12309

    
12310

    
12311
#: Query type implementations
12312
_QUERY_IMPL = {
12313
  constants.QR_INSTANCE: _InstanceQuery,
12314
  constants.QR_NODE: _NodeQuery,
12315
  constants.QR_GROUP: _GroupQuery,
12316
  constants.QR_OS: _OsQuery,
12317
  }
12318

    
12319
assert set(_QUERY_IMPL.keys()) == constants.QR_VIA_OP
12320

    
12321

    
12322
def _GetQueryImplementation(name):
12323
  """Returns the implemtnation for a query type.
12324

12325
  @param name: Query type, must be one of L{constants.QR_VIA_OP}
12326

12327
  """
12328
  try:
12329
    return _QUERY_IMPL[name]
12330
  except KeyError:
12331
    raise errors.OpPrereqError("Unknown query resource '%s'" % name,
12332
                               errors.ECODE_INVAL)