Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ db8e5f1c

History | View | Annotate | Download (406.3 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the master-side code."""
23

    
24
# pylint: disable-msg=W0201,C0302
25

    
26
# W0201 since most LU attributes are defined in CheckPrereq or similar
27
# functions
28

    
29
# C0302: since we have waaaay to many lines in this module
30

    
31
import os
32
import os.path
33
import time
34
import re
35
import platform
36
import logging
37
import copy
38
import OpenSSL
39
import socket
40
import tempfile
41
import shutil
42
import itertools
43

    
44
from ganeti import ssh
45
from ganeti import utils
46
from ganeti import errors
47
from ganeti import hypervisor
48
from ganeti import locking
49
from ganeti import constants
50
from ganeti import objects
51
from ganeti import serializer
52
from ganeti import ssconf
53
from ganeti import uidpool
54
from ganeti import compat
55
from ganeti import masterd
56
from ganeti import netutils
57
from ganeti import query
58
from ganeti import qlang
59
from ganeti import opcodes
60

    
61
import ganeti.masterd.instance # pylint: disable-msg=W0611
62

    
63

    
64
def _SupportsOob(cfg, node):
65
  """Tells if node supports OOB.
66

67
  @type cfg: L{config.ConfigWriter}
68
  @param cfg: The cluster configuration
69
  @type node: L{objects.Node}
70
  @param node: The node
71
  @return: The OOB script if supported or an empty string otherwise
72

73
  """
74
  return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
75

    
76

    
77
# End types
78
class LogicalUnit(object):
79
  """Logical Unit base class.
80

81
  Subclasses must follow these rules:
82
    - implement ExpandNames
83
    - implement CheckPrereq (except when tasklets are used)
84
    - implement Exec (except when tasklets are used)
85
    - implement BuildHooksEnv
86
    - redefine HPATH and HTYPE
87
    - optionally redefine their run requirements:
88
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
89

90
  Note that all commands require root permissions.
91

92
  @ivar dry_run_result: the value (if any) that will be returned to the caller
93
      in dry-run mode (signalled by opcode dry_run parameter)
94

95
  """
96
  HPATH = None
97
  HTYPE = None
98
  REQ_BGL = True
99

    
100
  def __init__(self, processor, op, context, rpc):
101
    """Constructor for LogicalUnit.
102

103
    This needs to be overridden in derived classes in order to check op
104
    validity.
105

106
    """
107
    self.proc = processor
108
    self.op = op
109
    self.cfg = context.cfg
110
    self.context = context
111
    self.rpc = rpc
112
    # Dicts used to declare locking needs to mcpu
113
    self.needed_locks = None
114
    self.acquired_locks = {}
115
    self.share_locks = dict.fromkeys(locking.LEVELS, 0)
116
    self.add_locks = {}
117
    self.remove_locks = {}
118
    # Used to force good behavior when calling helper functions
119
    self.recalculate_locks = {}
120
    self.__ssh = None
121
    # logging
122
    self.Log = processor.Log # pylint: disable-msg=C0103
123
    self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103
124
    self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103
125
    self.LogStep = processor.LogStep # pylint: disable-msg=C0103
126
    # support for dry-run
127
    self.dry_run_result = None
128
    # support for generic debug attribute
129
    if (not hasattr(self.op, "debug_level") or
130
        not isinstance(self.op.debug_level, int)):
131
      self.op.debug_level = 0
132

    
133
    # Tasklets
134
    self.tasklets = None
135

    
136
    # Validate opcode parameters and set defaults
137
    self.op.Validate(True)
138

    
139
    self.CheckArguments()
140

    
141
  def __GetSSH(self):
142
    """Returns the SshRunner object
143

144
    """
145
    if not self.__ssh:
146
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
147
    return self.__ssh
148

    
149
  ssh = property(fget=__GetSSH)
150

    
151
  def CheckArguments(self):
152
    """Check syntactic validity for the opcode arguments.
153

154
    This method is for doing a simple syntactic check and ensure
155
    validity of opcode parameters, without any cluster-related
156
    checks. While the same can be accomplished in ExpandNames and/or
157
    CheckPrereq, doing these separate is better because:
158

159
      - ExpandNames is left as as purely a lock-related function
160
      - CheckPrereq is run after we have acquired locks (and possible
161
        waited for them)
162

163
    The function is allowed to change the self.op attribute so that
164
    later methods can no longer worry about missing parameters.
165

166
    """
167
    pass
168

    
169
  def ExpandNames(self):
170
    """Expand names for this LU.
171

172
    This method is called before starting to execute the opcode, and it should
173
    update all the parameters of the opcode to their canonical form (e.g. a
174
    short node name must be fully expanded after this method has successfully
175
    completed). This way locking, hooks, logging, etc. can work correctly.
176

177
    LUs which implement this method must also populate the self.needed_locks
178
    member, as a dict with lock levels as keys, and a list of needed lock names
179
    as values. Rules:
180

181
      - use an empty dict if you don't need any lock
182
      - if you don't need any lock at a particular level omit that level
183
      - don't put anything for the BGL level
184
      - if you want all locks at a level use locking.ALL_SET as a value
185

186
    If you need to share locks (rather than acquire them exclusively) at one
187
    level you can modify self.share_locks, setting a true value (usually 1) for
188
    that level. By default locks are not shared.
189

190
    This function can also define a list of tasklets, which then will be
191
    executed in order instead of the usual LU-level CheckPrereq and Exec
192
    functions, if those are not defined by the LU.
193

194
    Examples::
195

196
      # Acquire all nodes and one instance
197
      self.needed_locks = {
198
        locking.LEVEL_NODE: locking.ALL_SET,
199
        locking.LEVEL_INSTANCE: ['instance1.example.com'],
200
      }
201
      # Acquire just two nodes
202
      self.needed_locks = {
203
        locking.LEVEL_NODE: ['node1.example.com', 'node2.example.com'],
204
      }
205
      # Acquire no locks
206
      self.needed_locks = {} # No, you can't leave it to the default value None
207

208
    """
209
    # The implementation of this method is mandatory only if the new LU is
210
    # concurrent, so that old LUs don't need to be changed all at the same
211
    # time.
212
    if self.REQ_BGL:
213
      self.needed_locks = {} # Exclusive LUs don't need locks.
214
    else:
215
      raise NotImplementedError
216

    
217
  def DeclareLocks(self, level):
218
    """Declare LU locking needs for a level
219

220
    While most LUs can just declare their locking needs at ExpandNames time,
221
    sometimes there's the need to calculate some locks after having acquired
222
    the ones before. This function is called just before acquiring locks at a
223
    particular level, but after acquiring the ones at lower levels, and permits
224
    such calculations. It can be used to modify self.needed_locks, and by
225
    default it does nothing.
226

227
    This function is only called if you have something already set in
228
    self.needed_locks for the level.
229

230
    @param level: Locking level which is going to be locked
231
    @type level: member of ganeti.locking.LEVELS
232

233
    """
234

    
235
  def CheckPrereq(self):
236
    """Check prerequisites for this LU.
237

238
    This method should check that the prerequisites for the execution
239
    of this LU are fulfilled. It can do internode communication, but
240
    it should be idempotent - no cluster or system changes are
241
    allowed.
242

243
    The method should raise errors.OpPrereqError in case something is
244
    not fulfilled. Its return value is ignored.
245

246
    This method should also update all the parameters of the opcode to
247
    their canonical form if it hasn't been done by ExpandNames before.
248

249
    """
250
    if self.tasklets is not None:
251
      for (idx, tl) in enumerate(self.tasklets):
252
        logging.debug("Checking prerequisites for tasklet %s/%s",
253
                      idx + 1, len(self.tasklets))
254
        tl.CheckPrereq()
255
    else:
256
      pass
257

    
258
  def Exec(self, feedback_fn):
259
    """Execute the LU.
260

261
    This method should implement the actual work. It should raise
262
    errors.OpExecError for failures that are somewhat dealt with in
263
    code, or expected.
264

265
    """
266
    if self.tasklets is not None:
267
      for (idx, tl) in enumerate(self.tasklets):
268
        logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
269
        tl.Exec(feedback_fn)
270
    else:
271
      raise NotImplementedError
272

    
273
  def BuildHooksEnv(self):
274
    """Build hooks environment for this LU.
275

276
    This method should return a three-node tuple consisting of: a dict
277
    containing the environment that will be used for running the
278
    specific hook for this LU, a list of node names on which the hook
279
    should run before the execution, and a list of node names on which
280
    the hook should run after the execution.
281

282
    The keys of the dict must not have 'GANETI_' prefixed as this will
283
    be handled in the hooks runner. Also note additional keys will be
284
    added by the hooks runner. If the LU doesn't define any
285
    environment, an empty dict (and not None) should be returned.
286

287
    No nodes should be returned as an empty list (and not None).
288

289
    Note that if the HPATH for a LU class is None, this function will
290
    not be called.
291

292
    """
293
    raise NotImplementedError
294

    
295
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
296
    """Notify the LU about the results of its hooks.
297

298
    This method is called every time a hooks phase is executed, and notifies
299
    the Logical Unit about the hooks' result. The LU can then use it to alter
300
    its result based on the hooks.  By default the method does nothing and the
301
    previous result is passed back unchanged but any LU can define it if it
302
    wants to use the local cluster hook-scripts somehow.
303

304
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
305
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
306
    @param hook_results: the results of the multi-node hooks rpc call
307
    @param feedback_fn: function used send feedback back to the caller
308
    @param lu_result: the previous Exec result this LU had, or None
309
        in the PRE phase
310
    @return: the new Exec result, based on the previous result
311
        and hook results
312

313
    """
314
    # API must be kept, thus we ignore the unused argument and could
315
    # be a function warnings
316
    # pylint: disable-msg=W0613,R0201
317
    return lu_result
318

    
319
  def _ExpandAndLockInstance(self):
320
    """Helper function to expand and lock an instance.
321

322
    Many LUs that work on an instance take its name in self.op.instance_name
323
    and need to expand it and then declare the expanded name for locking. This
324
    function does it, and then updates self.op.instance_name to the expanded
325
    name. It also initializes needed_locks as a dict, if this hasn't been done
326
    before.
327

328
    """
329
    if self.needed_locks is None:
330
      self.needed_locks = {}
331
    else:
332
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
333
        "_ExpandAndLockInstance called with instance-level locks set"
334
    self.op.instance_name = _ExpandInstanceName(self.cfg,
335
                                                self.op.instance_name)
336
    self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
337

    
338
  def _LockInstancesNodes(self, primary_only=False):
339
    """Helper function to declare instances' nodes for locking.
340

341
    This function should be called after locking one or more instances to lock
342
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
343
    with all primary or secondary nodes for instances already locked and
344
    present in self.needed_locks[locking.LEVEL_INSTANCE].
345

346
    It should be called from DeclareLocks, and for safety only works if
347
    self.recalculate_locks[locking.LEVEL_NODE] is set.
348

349
    In the future it may grow parameters to just lock some instance's nodes, or
350
    to just lock primaries or secondary nodes, if needed.
351

352
    If should be called in DeclareLocks in a way similar to::
353

354
      if level == locking.LEVEL_NODE:
355
        self._LockInstancesNodes()
356

357
    @type primary_only: boolean
358
    @param primary_only: only lock primary nodes of locked instances
359

360
    """
361
    assert locking.LEVEL_NODE in self.recalculate_locks, \
362
      "_LockInstancesNodes helper function called with no nodes to recalculate"
363

    
364
    # TODO: check if we're really been called with the instance locks held
365

    
366
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
367
    # future we might want to have different behaviors depending on the value
368
    # of self.recalculate_locks[locking.LEVEL_NODE]
369
    wanted_nodes = []
370
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
371
      instance = self.context.cfg.GetInstanceInfo(instance_name)
372
      wanted_nodes.append(instance.primary_node)
373
      if not primary_only:
374
        wanted_nodes.extend(instance.secondary_nodes)
375

    
376
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
377
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
378
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
379
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
380

    
381
    del self.recalculate_locks[locking.LEVEL_NODE]
382

    
383

    
384
class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223
385
  """Simple LU which runs no hooks.
386

387
  This LU is intended as a parent for other LogicalUnits which will
388
  run no hooks, in order to reduce duplicate code.
389

390
  """
391
  HPATH = None
392
  HTYPE = None
393

    
394
  def BuildHooksEnv(self):
395
    """Empty BuildHooksEnv for NoHooksLu.
396

397
    This just raises an error.
398

399
    """
400
    assert False, "BuildHooksEnv called for NoHooksLUs"
401

    
402

    
403
class Tasklet:
404
  """Tasklet base class.
405

406
  Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
407
  they can mix legacy code with tasklets. Locking needs to be done in the LU,
408
  tasklets know nothing about locks.
409

410
  Subclasses must follow these rules:
411
    - Implement CheckPrereq
412
    - Implement Exec
413

414
  """
415
  def __init__(self, lu):
416
    self.lu = lu
417

    
418
    # Shortcuts
419
    self.cfg = lu.cfg
420
    self.rpc = lu.rpc
421

    
422
  def CheckPrereq(self):
423
    """Check prerequisites for this tasklets.
424

425
    This method should check whether the prerequisites for the execution of
426
    this tasklet are fulfilled. It can do internode communication, but it
427
    should be idempotent - no cluster or system changes are allowed.
428

429
    The method should raise errors.OpPrereqError in case something is not
430
    fulfilled. Its return value is ignored.
431

432
    This method should also update all parameters to their canonical form if it
433
    hasn't been done before.
434

435
    """
436
    pass
437

    
438
  def Exec(self, feedback_fn):
439
    """Execute the tasklet.
440

441
    This method should implement the actual work. It should raise
442
    errors.OpExecError for failures that are somewhat dealt with in code, or
443
    expected.
444

445
    """
446
    raise NotImplementedError
447

    
448

    
449
class _QueryBase:
450
  """Base for query utility classes.
451

452
  """
453
  #: Attribute holding field definitions
454
  FIELDS = None
455

    
456
  def __init__(self, names, fields, use_locking):
457
    """Initializes this class.
458

459
    """
460
    self.names = names
461
    self.use_locking = use_locking
462

    
463
    self.query = query.Query(self.FIELDS, fields)
464
    self.requested_data = self.query.RequestedData()
465

    
466
    self.do_locking = None
467
    self.wanted = None
468

    
469
  def _GetNames(self, lu, all_names, lock_level):
470
    """Helper function to determine names asked for in the query.
471

472
    """
473
    if self.do_locking:
474
      names = lu.acquired_locks[lock_level]
475
    else:
476
      names = all_names
477

    
478
    if self.wanted == locking.ALL_SET:
479
      assert not self.names
480
      # caller didn't specify names, so ordering is not important
481
      return utils.NiceSort(names)
482

    
483
    # caller specified names and we must keep the same order
484
    assert self.names
485
    assert not self.do_locking or lu.acquired_locks[lock_level]
486

    
487
    missing = set(self.wanted).difference(names)
488
    if missing:
489
      raise errors.OpExecError("Some items were removed before retrieving"
490
                               " their data: %s" % missing)
491

    
492
    # Return expanded names
493
    return self.wanted
494

    
495
  @classmethod
496
  def FieldsQuery(cls, fields):
497
    """Returns list of available fields.
498

499
    @return: List of L{objects.QueryFieldDefinition}
500

501
    """
502
    return query.QueryFields(cls.FIELDS, fields)
503

    
504
  def ExpandNames(self, lu):
505
    """Expand names for this query.
506

507
    See L{LogicalUnit.ExpandNames}.
508

509
    """
510
    raise NotImplementedError()
511

    
512
  def DeclareLocks(self, lu, level):
513
    """Declare locks for this query.
514

515
    See L{LogicalUnit.DeclareLocks}.
516

517
    """
518
    raise NotImplementedError()
519

    
520
  def _GetQueryData(self, lu):
521
    """Collects all data for this query.
522

523
    @return: Query data object
524

525
    """
526
    raise NotImplementedError()
527

    
528
  def NewStyleQuery(self, lu):
529
    """Collect data and execute query.
530

531
    """
532
    return query.GetQueryResponse(self.query, self._GetQueryData(lu))
533

    
534
  def OldStyleQuery(self, lu):
535
    """Collect data and execute query.
536

537
    """
538
    return self.query.OldStyleQuery(self._GetQueryData(lu))
539

    
540

    
541
def _GetWantedNodes(lu, nodes):
542
  """Returns list of checked and expanded node names.
543

544
  @type lu: L{LogicalUnit}
545
  @param lu: the logical unit on whose behalf we execute
546
  @type nodes: list
547
  @param nodes: list of node names or None for all nodes
548
  @rtype: list
549
  @return: the list of nodes, sorted
550
  @raise errors.ProgrammerError: if the nodes parameter is wrong type
551

552
  """
553
  if nodes:
554
    return [_ExpandNodeName(lu.cfg, name) for name in nodes]
555

    
556
  return utils.NiceSort(lu.cfg.GetNodeList())
557

    
558

    
559
def _GetWantedInstances(lu, instances):
560
  """Returns list of checked and expanded instance names.
561

562
  @type lu: L{LogicalUnit}
563
  @param lu: the logical unit on whose behalf we execute
564
  @type instances: list
565
  @param instances: list of instance names or None for all instances
566
  @rtype: list
567
  @return: the list of instances, sorted
568
  @raise errors.OpPrereqError: if the instances parameter is wrong type
569
  @raise errors.OpPrereqError: if any of the passed instances is not found
570

571
  """
572
  if instances:
573
    wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
574
  else:
575
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
576
  return wanted
577

    
578

    
579
def _GetUpdatedParams(old_params, update_dict,
580
                      use_default=True, use_none=False):
581
  """Return the new version of a parameter dictionary.
582

583
  @type old_params: dict
584
  @param old_params: old parameters
585
  @type update_dict: dict
586
  @param update_dict: dict containing new parameter values, or
587
      constants.VALUE_DEFAULT to reset the parameter to its default
588
      value
589
  @param use_default: boolean
590
  @type use_default: whether to recognise L{constants.VALUE_DEFAULT}
591
      values as 'to be deleted' values
592
  @param use_none: boolean
593
  @type use_none: whether to recognise C{None} values as 'to be
594
      deleted' values
595
  @rtype: dict
596
  @return: the new parameter dictionary
597

598
  """
599
  params_copy = copy.deepcopy(old_params)
600
  for key, val in update_dict.iteritems():
601
    if ((use_default and val == constants.VALUE_DEFAULT) or
602
        (use_none and val is None)):
603
      try:
604
        del params_copy[key]
605
      except KeyError:
606
        pass
607
    else:
608
      params_copy[key] = val
609
  return params_copy
610

    
611

    
612
def _CheckOutputFields(static, dynamic, selected):
613
  """Checks whether all selected fields are valid.
614

615
  @type static: L{utils.FieldSet}
616
  @param static: static fields set
617
  @type dynamic: L{utils.FieldSet}
618
  @param dynamic: dynamic fields set
619

620
  """
621
  f = utils.FieldSet()
622
  f.Extend(static)
623
  f.Extend(dynamic)
624

    
625
  delta = f.NonMatching(selected)
626
  if delta:
627
    raise errors.OpPrereqError("Unknown output fields selected: %s"
628
                               % ",".join(delta), errors.ECODE_INVAL)
629

    
630

    
631
def _CheckGlobalHvParams(params):
632
  """Validates that given hypervisor params are not global ones.
633

634
  This will ensure that instances don't get customised versions of
635
  global params.
636

637
  """
638
  used_globals = constants.HVC_GLOBALS.intersection(params)
639
  if used_globals:
640
    msg = ("The following hypervisor parameters are global and cannot"
641
           " be customized at instance level, please modify them at"
642
           " cluster level: %s" % utils.CommaJoin(used_globals))
643
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
644

    
645

    
646
def _CheckNodeOnline(lu, node, msg=None):
647
  """Ensure that a given node is online.
648

649
  @param lu: the LU on behalf of which we make the check
650
  @param node: the node to check
651
  @param msg: if passed, should be a message to replace the default one
652
  @raise errors.OpPrereqError: if the node is offline
653

654
  """
655
  if msg is None:
656
    msg = "Can't use offline node"
657
  if lu.cfg.GetNodeInfo(node).offline:
658
    raise errors.OpPrereqError("%s: %s" % (msg, node), errors.ECODE_STATE)
659

    
660

    
661
def _CheckNodeNotDrained(lu, node):
662
  """Ensure that a given node is not drained.
663

664
  @param lu: the LU on behalf of which we make the check
665
  @param node: the node to check
666
  @raise errors.OpPrereqError: if the node is drained
667

668
  """
669
  if lu.cfg.GetNodeInfo(node).drained:
670
    raise errors.OpPrereqError("Can't use drained node %s" % node,
671
                               errors.ECODE_STATE)
672

    
673

    
674
def _CheckNodeVmCapable(lu, node):
675
  """Ensure that a given node is vm capable.
676

677
  @param lu: the LU on behalf of which we make the check
678
  @param node: the node to check
679
  @raise errors.OpPrereqError: if the node is not vm capable
680

681
  """
682
  if not lu.cfg.GetNodeInfo(node).vm_capable:
683
    raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node,
684
                               errors.ECODE_STATE)
685

    
686

    
687
def _CheckNodeHasOS(lu, node, os_name, force_variant):
688
  """Ensure that a node supports a given OS.
689

690
  @param lu: the LU on behalf of which we make the check
691
  @param node: the node to check
692
  @param os_name: the OS to query about
693
  @param force_variant: whether to ignore variant errors
694
  @raise errors.OpPrereqError: if the node is not supporting the OS
695

696
  """
697
  result = lu.rpc.call_os_get(node, os_name)
698
  result.Raise("OS '%s' not in supported OS list for node %s" %
699
               (os_name, node),
700
               prereq=True, ecode=errors.ECODE_INVAL)
701
  if not force_variant:
702
    _CheckOSVariant(result.payload, os_name)
703

    
704

    
705
def _CheckNodeHasSecondaryIP(lu, node, secondary_ip, prereq):
706
  """Ensure that a node has the given secondary ip.
707

708
  @type lu: L{LogicalUnit}
709
  @param lu: the LU on behalf of which we make the check
710
  @type node: string
711
  @param node: the node to check
712
  @type secondary_ip: string
713
  @param secondary_ip: the ip to check
714
  @type prereq: boolean
715
  @param prereq: whether to throw a prerequisite or an execute error
716
  @raise errors.OpPrereqError: if the node doesn't have the ip, and prereq=True
717
  @raise errors.OpExecError: if the node doesn't have the ip, and prereq=False
718

719
  """
720
  result = lu.rpc.call_node_has_ip_address(node, secondary_ip)
721
  result.Raise("Failure checking secondary ip on node %s" % node,
722
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
723
  if not result.payload:
724
    msg = ("Node claims it doesn't have the secondary ip you gave (%s),"
725
           " please fix and re-run this command" % secondary_ip)
726
    if prereq:
727
      raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
728
    else:
729
      raise errors.OpExecError(msg)
730

    
731

    
732
def _GetClusterDomainSecret():
733
  """Reads the cluster domain secret.
734

735
  """
736
  return utils.ReadOneLineFile(constants.CLUSTER_DOMAIN_SECRET_FILE,
737
                               strict=True)
738

    
739

    
740
def _CheckInstanceDown(lu, instance, reason):
741
  """Ensure that an instance is not running."""
742
  if instance.admin_up:
743
    raise errors.OpPrereqError("Instance %s is marked to be up, %s" %
744
                               (instance.name, reason), errors.ECODE_STATE)
745

    
746
  pnode = instance.primary_node
747
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
748
  ins_l.Raise("Can't contact node %s for instance information" % pnode,
749
              prereq=True, ecode=errors.ECODE_ENVIRON)
750

    
751
  if instance.name in ins_l.payload:
752
    raise errors.OpPrereqError("Instance %s is running, %s" %
753
                               (instance.name, reason), errors.ECODE_STATE)
754

    
755

    
756
def _ExpandItemName(fn, name, kind):
757
  """Expand an item name.
758

759
  @param fn: the function to use for expansion
760
  @param name: requested item name
761
  @param kind: text description ('Node' or 'Instance')
762
  @return: the resolved (full) name
763
  @raise errors.OpPrereqError: if the item is not found
764

765
  """
766
  full_name = fn(name)
767
  if full_name is None:
768
    raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
769
                               errors.ECODE_NOENT)
770
  return full_name
771

    
772

    
773
def _ExpandNodeName(cfg, name):
774
  """Wrapper over L{_ExpandItemName} for nodes."""
775
  return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
776

    
777

    
778
def _ExpandInstanceName(cfg, name):
779
  """Wrapper over L{_ExpandItemName} for instance."""
780
  return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
781

    
782

    
783
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
784
                          memory, vcpus, nics, disk_template, disks,
785
                          bep, hvp, hypervisor_name):
786
  """Builds instance related env variables for hooks
787

788
  This builds the hook environment from individual variables.
789

790
  @type name: string
791
  @param name: the name of the instance
792
  @type primary_node: string
793
  @param primary_node: the name of the instance's primary node
794
  @type secondary_nodes: list
795
  @param secondary_nodes: list of secondary nodes as strings
796
  @type os_type: string
797
  @param os_type: the name of the instance's OS
798
  @type status: boolean
799
  @param status: the should_run status of the instance
800
  @type memory: string
801
  @param memory: the memory size of the instance
802
  @type vcpus: string
803
  @param vcpus: the count of VCPUs the instance has
804
  @type nics: list
805
  @param nics: list of tuples (ip, mac, mode, link) representing
806
      the NICs the instance has
807
  @type disk_template: string
808
  @param disk_template: the disk template of the instance
809
  @type disks: list
810
  @param disks: the list of (size, mode) pairs
811
  @type bep: dict
812
  @param bep: the backend parameters for the instance
813
  @type hvp: dict
814
  @param hvp: the hypervisor parameters for the instance
815
  @type hypervisor_name: string
816
  @param hypervisor_name: the hypervisor for the instance
817
  @rtype: dict
818
  @return: the hook environment for this instance
819

820
  """
821
  if status:
822
    str_status = "up"
823
  else:
824
    str_status = "down"
825
  env = {
826
    "OP_TARGET": name,
827
    "INSTANCE_NAME": name,
828
    "INSTANCE_PRIMARY": primary_node,
829
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
830
    "INSTANCE_OS_TYPE": os_type,
831
    "INSTANCE_STATUS": str_status,
832
    "INSTANCE_MEMORY": memory,
833
    "INSTANCE_VCPUS": vcpus,
834
    "INSTANCE_DISK_TEMPLATE": disk_template,
835
    "INSTANCE_HYPERVISOR": hypervisor_name,
836
  }
837

    
838
  if nics:
839
    nic_count = len(nics)
840
    for idx, (ip, mac, mode, link) in enumerate(nics):
841
      if ip is None:
842
        ip = ""
843
      env["INSTANCE_NIC%d_IP" % idx] = ip
844
      env["INSTANCE_NIC%d_MAC" % idx] = mac
845
      env["INSTANCE_NIC%d_MODE" % idx] = mode
846
      env["INSTANCE_NIC%d_LINK" % idx] = link
847
      if mode == constants.NIC_MODE_BRIDGED:
848
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
849
  else:
850
    nic_count = 0
851

    
852
  env["INSTANCE_NIC_COUNT"] = nic_count
853

    
854
  if disks:
855
    disk_count = len(disks)
856
    for idx, (size, mode) in enumerate(disks):
857
      env["INSTANCE_DISK%d_SIZE" % idx] = size
858
      env["INSTANCE_DISK%d_MODE" % idx] = mode
859
  else:
860
    disk_count = 0
861

    
862
  env["INSTANCE_DISK_COUNT"] = disk_count
863

    
864
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
865
    for key, value in source.items():
866
      env["INSTANCE_%s_%s" % (kind, key)] = value
867

    
868
  return env
869

    
870

    
871
def _NICListToTuple(lu, nics):
872
  """Build a list of nic information tuples.
873

874
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
875
  value in LUInstanceQueryData.
876

877
  @type lu:  L{LogicalUnit}
878
  @param lu: the logical unit on whose behalf we execute
879
  @type nics: list of L{objects.NIC}
880
  @param nics: list of nics to convert to hooks tuples
881

882
  """
883
  hooks_nics = []
884
  cluster = lu.cfg.GetClusterInfo()
885
  for nic in nics:
886
    ip = nic.ip
887
    mac = nic.mac
888
    filled_params = cluster.SimpleFillNIC(nic.nicparams)
889
    mode = filled_params[constants.NIC_MODE]
890
    link = filled_params[constants.NIC_LINK]
891
    hooks_nics.append((ip, mac, mode, link))
892
  return hooks_nics
893

    
894

    
895
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
896
  """Builds instance related env variables for hooks from an object.
897

898
  @type lu: L{LogicalUnit}
899
  @param lu: the logical unit on whose behalf we execute
900
  @type instance: L{objects.Instance}
901
  @param instance: the instance for which we should build the
902
      environment
903
  @type override: dict
904
  @param override: dictionary with key/values that will override
905
      our values
906
  @rtype: dict
907
  @return: the hook environment dictionary
908

909
  """
910
  cluster = lu.cfg.GetClusterInfo()
911
  bep = cluster.FillBE(instance)
912
  hvp = cluster.FillHV(instance)
913
  args = {
914
    'name': instance.name,
915
    'primary_node': instance.primary_node,
916
    'secondary_nodes': instance.secondary_nodes,
917
    'os_type': instance.os,
918
    'status': instance.admin_up,
919
    'memory': bep[constants.BE_MEMORY],
920
    'vcpus': bep[constants.BE_VCPUS],
921
    'nics': _NICListToTuple(lu, instance.nics),
922
    'disk_template': instance.disk_template,
923
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
924
    'bep': bep,
925
    'hvp': hvp,
926
    'hypervisor_name': instance.hypervisor,
927
  }
928
  if override:
929
    args.update(override)
930
  return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142
931

    
932

    
933
def _AdjustCandidatePool(lu, exceptions):
934
  """Adjust the candidate pool after node operations.
935

936
  """
937
  mod_list = lu.cfg.MaintainCandidatePool(exceptions)
938
  if mod_list:
939
    lu.LogInfo("Promoted nodes to master candidate role: %s",
940
               utils.CommaJoin(node.name for node in mod_list))
941
    for name in mod_list:
942
      lu.context.ReaddNode(name)
943
  mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
944
  if mc_now > mc_max:
945
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
946
               (mc_now, mc_max))
947

    
948

    
949
def _DecideSelfPromotion(lu, exceptions=None):
950
  """Decide whether I should promote myself as a master candidate.
951

952
  """
953
  cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
954
  mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
955
  # the new node will increase mc_max with one, so:
956
  mc_should = min(mc_should + 1, cp_size)
957
  return mc_now < mc_should
958

    
959

    
960
def _CheckNicsBridgesExist(lu, target_nics, target_node):
961
  """Check that the brigdes needed by a list of nics exist.
962

963
  """
964
  cluster = lu.cfg.GetClusterInfo()
965
  paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in target_nics]
966
  brlist = [params[constants.NIC_LINK] for params in paramslist
967
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
968
  if brlist:
969
    result = lu.rpc.call_bridges_exist(target_node, brlist)
970
    result.Raise("Error checking bridges on destination node '%s'" %
971
                 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
972

    
973

    
974
def _CheckInstanceBridgesExist(lu, instance, node=None):
975
  """Check that the brigdes needed by an instance exist.
976

977
  """
978
  if node is None:
979
    node = instance.primary_node
980
  _CheckNicsBridgesExist(lu, instance.nics, node)
981

    
982

    
983
def _CheckOSVariant(os_obj, name):
984
  """Check whether an OS name conforms to the os variants specification.
985

986
  @type os_obj: L{objects.OS}
987
  @param os_obj: OS object to check
988
  @type name: string
989
  @param name: OS name passed by the user, to check for validity
990

991
  """
992
  if not os_obj.supported_variants:
993
    return
994
  variant = objects.OS.GetVariant(name)
995
  if not variant:
996
    raise errors.OpPrereqError("OS name must include a variant",
997
                               errors.ECODE_INVAL)
998

    
999
  if variant not in os_obj.supported_variants:
1000
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
1001

    
1002

    
1003
def _GetNodeInstancesInner(cfg, fn):
1004
  return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
1005

    
1006

    
1007
def _GetNodeInstances(cfg, node_name):
1008
  """Returns a list of all primary and secondary instances on a node.
1009

1010
  """
1011

    
1012
  return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
1013

    
1014

    
1015
def _GetNodePrimaryInstances(cfg, node_name):
1016
  """Returns primary instances on a node.
1017

1018
  """
1019
  return _GetNodeInstancesInner(cfg,
1020
                                lambda inst: node_name == inst.primary_node)
1021

    
1022

    
1023
def _GetNodeSecondaryInstances(cfg, node_name):
1024
  """Returns secondary instances on a node.
1025

1026
  """
1027
  return _GetNodeInstancesInner(cfg,
1028
                                lambda inst: node_name in inst.secondary_nodes)
1029

    
1030

    
1031
def _GetStorageTypeArgs(cfg, storage_type):
1032
  """Returns the arguments for a storage type.
1033

1034
  """
1035
  # Special case for file storage
1036
  if storage_type == constants.ST_FILE:
1037
    # storage.FileStorage wants a list of storage directories
1038
    return [[cfg.GetFileStorageDir()]]
1039

    
1040
  return []
1041

    
1042

    
1043
def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
1044
  faulty = []
1045

    
1046
  for dev in instance.disks:
1047
    cfg.SetDiskID(dev, node_name)
1048

    
1049
  result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
1050
  result.Raise("Failed to get disk status from node %s" % node_name,
1051
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
1052

    
1053
  for idx, bdev_status in enumerate(result.payload):
1054
    if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
1055
      faulty.append(idx)
1056

    
1057
  return faulty
1058

    
1059

    
1060
def _CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
1061
  """Check the sanity of iallocator and node arguments and use the
1062
  cluster-wide iallocator if appropriate.
1063

1064
  Check that at most one of (iallocator, node) is specified. If none is
1065
  specified, then the LU's opcode's iallocator slot is filled with the
1066
  cluster-wide default iallocator.
1067

1068
  @type iallocator_slot: string
1069
  @param iallocator_slot: the name of the opcode iallocator slot
1070
  @type node_slot: string
1071
  @param node_slot: the name of the opcode target node slot
1072

1073
  """
1074
  node = getattr(lu.op, node_slot, None)
1075
  iallocator = getattr(lu.op, iallocator_slot, None)
1076

    
1077
  if node is not None and iallocator is not None:
1078
    raise errors.OpPrereqError("Do not specify both, iallocator and node.",
1079
                               errors.ECODE_INVAL)
1080
  elif node is None and iallocator is None:
1081
    default_iallocator = lu.cfg.GetDefaultIAllocator()
1082
    if default_iallocator:
1083
      setattr(lu.op, iallocator_slot, default_iallocator)
1084
    else:
1085
      raise errors.OpPrereqError("No iallocator or node given and no"
1086
                                 " cluster-wide default iallocator found."
1087
                                 " Please specify either an iallocator or a"
1088
                                 " node, or set a cluster-wide default"
1089
                                 " iallocator.")
1090

    
1091

    
1092
class LUClusterPostInit(LogicalUnit):
1093
  """Logical unit for running hooks after cluster initialization.
1094

1095
  """
1096
  HPATH = "cluster-init"
1097
  HTYPE = constants.HTYPE_CLUSTER
1098

    
1099
  def BuildHooksEnv(self):
1100
    """Build hooks env.
1101

1102
    """
1103
    env = {"OP_TARGET": self.cfg.GetClusterName()}
1104
    mn = self.cfg.GetMasterNode()
1105
    return env, [], [mn]
1106

    
1107
  def Exec(self, feedback_fn):
1108
    """Nothing to do.
1109

1110
    """
1111
    return True
1112

    
1113

    
1114
class LUClusterDestroy(LogicalUnit):
1115
  """Logical unit for destroying the cluster.
1116

1117
  """
1118
  HPATH = "cluster-destroy"
1119
  HTYPE = constants.HTYPE_CLUSTER
1120

    
1121
  def BuildHooksEnv(self):
1122
    """Build hooks env.
1123

1124
    """
1125
    env = {"OP_TARGET": self.cfg.GetClusterName()}
1126
    return env, [], []
1127

    
1128
  def CheckPrereq(self):
1129
    """Check prerequisites.
1130

1131
    This checks whether the cluster is empty.
1132

1133
    Any errors are signaled by raising errors.OpPrereqError.
1134

1135
    """
1136
    master = self.cfg.GetMasterNode()
1137

    
1138
    nodelist = self.cfg.GetNodeList()
1139
    if len(nodelist) != 1 or nodelist[0] != master:
1140
      raise errors.OpPrereqError("There are still %d node(s) in"
1141
                                 " this cluster." % (len(nodelist) - 1),
1142
                                 errors.ECODE_INVAL)
1143
    instancelist = self.cfg.GetInstanceList()
1144
    if instancelist:
1145
      raise errors.OpPrereqError("There are still %d instance(s) in"
1146
                                 " this cluster." % len(instancelist),
1147
                                 errors.ECODE_INVAL)
1148

    
1149
  def Exec(self, feedback_fn):
1150
    """Destroys the cluster.
1151

1152
    """
1153
    master = self.cfg.GetMasterNode()
1154

    
1155
    # Run post hooks on master node before it's removed
1156
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
1157
    try:
1158
      hm.RunPhase(constants.HOOKS_PHASE_POST, [master])
1159
    except:
1160
      # pylint: disable-msg=W0702
1161
      self.LogWarning("Errors occurred running hooks on %s" % master)
1162

    
1163
    result = self.rpc.call_node_stop_master(master, False)
1164
    result.Raise("Could not disable the master role")
1165

    
1166
    return master
1167

    
1168

    
1169
def _VerifyCertificate(filename):
1170
  """Verifies a certificate for LUClusterVerify.
1171

1172
  @type filename: string
1173
  @param filename: Path to PEM file
1174

1175
  """
1176
  try:
1177
    cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
1178
                                           utils.ReadFile(filename))
1179
  except Exception, err: # pylint: disable-msg=W0703
1180
    return (LUClusterVerify.ETYPE_ERROR,
1181
            "Failed to load X509 certificate %s: %s" % (filename, err))
1182

    
1183
  (errcode, msg) = \
1184
    utils.VerifyX509Certificate(cert, constants.SSL_CERT_EXPIRATION_WARN,
1185
                                constants.SSL_CERT_EXPIRATION_ERROR)
1186

    
1187
  if msg:
1188
    fnamemsg = "While verifying %s: %s" % (filename, msg)
1189
  else:
1190
    fnamemsg = None
1191

    
1192
  if errcode is None:
1193
    return (None, fnamemsg)
1194
  elif errcode == utils.CERT_WARNING:
1195
    return (LUClusterVerify.ETYPE_WARNING, fnamemsg)
1196
  elif errcode == utils.CERT_ERROR:
1197
    return (LUClusterVerify.ETYPE_ERROR, fnamemsg)
1198

    
1199
  raise errors.ProgrammerError("Unhandled certificate error code %r" % errcode)
1200

    
1201

    
1202
class LUClusterVerify(LogicalUnit):
1203
  """Verifies the cluster status.
1204

1205
  """
1206
  HPATH = "cluster-verify"
1207
  HTYPE = constants.HTYPE_CLUSTER
1208
  REQ_BGL = False
1209

    
1210
  TCLUSTER = "cluster"
1211
  TNODE = "node"
1212
  TINSTANCE = "instance"
1213

    
1214
  ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
1215
  ECLUSTERCERT = (TCLUSTER, "ECLUSTERCERT")
1216
  EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
1217
  EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
1218
  EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
1219
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1220
  EINSTANCEFAULTYDISK = (TINSTANCE, "EINSTANCEFAULTYDISK")
1221
  EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
1222
  EINSTANCESPLITGROUPS = (TINSTANCE, "EINSTANCESPLITGROUPS")
1223
  ENODEDRBD = (TNODE, "ENODEDRBD")
1224
  ENODEDRBDHELPER = (TNODE, "ENODEDRBDHELPER")
1225
  ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
1226
  ENODEHOOKS = (TNODE, "ENODEHOOKS")
1227
  ENODEHV = (TNODE, "ENODEHV")
1228
  ENODELVM = (TNODE, "ENODELVM")
1229
  ENODEN1 = (TNODE, "ENODEN1")
1230
  ENODENET = (TNODE, "ENODENET")
1231
  ENODEOS = (TNODE, "ENODEOS")
1232
  ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
1233
  ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
1234
  ENODERPC = (TNODE, "ENODERPC")
1235
  ENODESSH = (TNODE, "ENODESSH")
1236
  ENODEVERSION = (TNODE, "ENODEVERSION")
1237
  ENODESETUP = (TNODE, "ENODESETUP")
1238
  ENODETIME = (TNODE, "ENODETIME")
1239
  ENODEOOBPATH = (TNODE, "ENODEOOBPATH")
1240

    
1241
  ETYPE_FIELD = "code"
1242
  ETYPE_ERROR = "ERROR"
1243
  ETYPE_WARNING = "WARNING"
1244

    
1245
  _HOOKS_INDENT_RE = re.compile("^", re.M)
1246

    
1247
  class NodeImage(object):
1248
    """A class representing the logical and physical status of a node.
1249

1250
    @type name: string
1251
    @ivar name: the node name to which this object refers
1252
    @ivar volumes: a structure as returned from
1253
        L{ganeti.backend.GetVolumeList} (runtime)
1254
    @ivar instances: a list of running instances (runtime)
1255
    @ivar pinst: list of configured primary instances (config)
1256
    @ivar sinst: list of configured secondary instances (config)
1257
    @ivar sbp: diction of {secondary-node: list of instances} of all peers
1258
        of this node (config)
1259
    @ivar mfree: free memory, as reported by hypervisor (runtime)
1260
    @ivar dfree: free disk, as reported by the node (runtime)
1261
    @ivar offline: the offline status (config)
1262
    @type rpc_fail: boolean
1263
    @ivar rpc_fail: whether the RPC verify call was successfull (overall,
1264
        not whether the individual keys were correct) (runtime)
1265
    @type lvm_fail: boolean
1266
    @ivar lvm_fail: whether the RPC call didn't return valid LVM data
1267
    @type hyp_fail: boolean
1268
    @ivar hyp_fail: whether the RPC call didn't return the instance list
1269
    @type ghost: boolean
1270
    @ivar ghost: whether this is a known node or not (config)
1271
    @type os_fail: boolean
1272
    @ivar os_fail: whether the RPC call didn't return valid OS data
1273
    @type oslist: list
1274
    @ivar oslist: list of OSes as diagnosed by DiagnoseOS
1275
    @type vm_capable: boolean
1276
    @ivar vm_capable: whether the node can host instances
1277

1278
    """
1279
    def __init__(self, offline=False, name=None, vm_capable=True):
1280
      self.name = name
1281
      self.volumes = {}
1282
      self.instances = []
1283
      self.pinst = []
1284
      self.sinst = []
1285
      self.sbp = {}
1286
      self.mfree = 0
1287
      self.dfree = 0
1288
      self.offline = offline
1289
      self.vm_capable = vm_capable
1290
      self.rpc_fail = False
1291
      self.lvm_fail = False
1292
      self.hyp_fail = False
1293
      self.ghost = False
1294
      self.os_fail = False
1295
      self.oslist = {}
1296

    
1297
  def ExpandNames(self):
1298
    self.needed_locks = {
1299
      locking.LEVEL_NODE: locking.ALL_SET,
1300
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1301
    }
1302
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1303

    
1304
  def _Error(self, ecode, item, msg, *args, **kwargs):
1305
    """Format an error message.
1306

1307
    Based on the opcode's error_codes parameter, either format a
1308
    parseable error code, or a simpler error string.
1309

1310
    This must be called only from Exec and functions called from Exec.
1311

1312
    """
1313
    ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
1314
    itype, etxt = ecode
1315
    # first complete the msg
1316
    if args:
1317
      msg = msg % args
1318
    # then format the whole message
1319
    if self.op.error_codes:
1320
      msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1321
    else:
1322
      if item:
1323
        item = " " + item
1324
      else:
1325
        item = ""
1326
      msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1327
    # and finally report it via the feedback_fn
1328
    self._feedback_fn("  - %s" % msg)
1329

    
1330
  def _ErrorIf(self, cond, *args, **kwargs):
1331
    """Log an error message if the passed condition is True.
1332

1333
    """
1334
    cond = bool(cond) or self.op.debug_simulate_errors
1335
    if cond:
1336
      self._Error(*args, **kwargs)
1337
    # do not mark the operation as failed for WARN cases only
1338
    if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
1339
      self.bad = self.bad or cond
1340

    
1341
  def _VerifyNode(self, ninfo, nresult):
1342
    """Perform some basic validation on data returned from a node.
1343

1344
      - check the result data structure is well formed and has all the
1345
        mandatory fields
1346
      - check ganeti version
1347

1348
    @type ninfo: L{objects.Node}
1349
    @param ninfo: the node to check
1350
    @param nresult: the results from the node
1351
    @rtype: boolean
1352
    @return: whether overall this call was successful (and we can expect
1353
         reasonable values in the respose)
1354

1355
    """
1356
    node = ninfo.name
1357
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1358

    
1359
    # main result, nresult should be a non-empty dict
1360
    test = not nresult or not isinstance(nresult, dict)
1361
    _ErrorIf(test, self.ENODERPC, node,
1362
                  "unable to verify node: no data returned")
1363
    if test:
1364
      return False
1365

    
1366
    # compares ganeti version
1367
    local_version = constants.PROTOCOL_VERSION
1368
    remote_version = nresult.get("version", None)
1369
    test = not (remote_version and
1370
                isinstance(remote_version, (list, tuple)) and
1371
                len(remote_version) == 2)
1372
    _ErrorIf(test, self.ENODERPC, node,
1373
             "connection to node returned invalid data")
1374
    if test:
1375
      return False
1376

    
1377
    test = local_version != remote_version[0]
1378
    _ErrorIf(test, self.ENODEVERSION, node,
1379
             "incompatible protocol versions: master %s,"
1380
             " node %s", local_version, remote_version[0])
1381
    if test:
1382
      return False
1383

    
1384
    # node seems compatible, we can actually try to look into its results
1385

    
1386
    # full package version
1387
    self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
1388
                  self.ENODEVERSION, node,
1389
                  "software version mismatch: master %s, node %s",
1390
                  constants.RELEASE_VERSION, remote_version[1],
1391
                  code=self.ETYPE_WARNING)
1392

    
1393
    hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
1394
    if ninfo.vm_capable and isinstance(hyp_result, dict):
1395
      for hv_name, hv_result in hyp_result.iteritems():
1396
        test = hv_result is not None
1397
        _ErrorIf(test, self.ENODEHV, node,
1398
                 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1399

    
1400
    hvp_result = nresult.get(constants.NV_HVPARAMS, None)
1401
    if ninfo.vm_capable and isinstance(hvp_result, list):
1402
      for item, hv_name, hv_result in hvp_result:
1403
        _ErrorIf(True, self.ENODEHV, node,
1404
                 "hypervisor %s parameter verify failure (source %s): %s",
1405
                 hv_name, item, hv_result)
1406

    
1407
    test = nresult.get(constants.NV_NODESETUP,
1408
                           ["Missing NODESETUP results"])
1409
    _ErrorIf(test, self.ENODESETUP, node, "node setup error: %s",
1410
             "; ".join(test))
1411

    
1412
    return True
1413

    
1414
  def _VerifyNodeTime(self, ninfo, nresult,
1415
                      nvinfo_starttime, nvinfo_endtime):
1416
    """Check the node time.
1417

1418
    @type ninfo: L{objects.Node}
1419
    @param ninfo: the node to check
1420
    @param nresult: the remote results for the node
1421
    @param nvinfo_starttime: the start time of the RPC call
1422
    @param nvinfo_endtime: the end time of the RPC call
1423

1424
    """
1425
    node = ninfo.name
1426
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1427

    
1428
    ntime = nresult.get(constants.NV_TIME, None)
1429
    try:
1430
      ntime_merged = utils.MergeTime(ntime)
1431
    except (ValueError, TypeError):
1432
      _ErrorIf(True, self.ENODETIME, node, "Node returned invalid time")
1433
      return
1434

    
1435
    if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
1436
      ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
1437
    elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
1438
      ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
1439
    else:
1440
      ntime_diff = None
1441

    
1442
    _ErrorIf(ntime_diff is not None, self.ENODETIME, node,
1443
             "Node time diverges by at least %s from master node time",
1444
             ntime_diff)
1445

    
1446
  def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
1447
    """Check the node LVM results.
1448

1449
    @type ninfo: L{objects.Node}
1450
    @param ninfo: the node to check
1451
    @param nresult: the remote results for the node
1452
    @param vg_name: the configured VG name
1453

1454
    """
1455
    if vg_name is None:
1456
      return
1457

    
1458
    node = ninfo.name
1459
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1460

    
1461
    # checks vg existence and size > 20G
1462
    vglist = nresult.get(constants.NV_VGLIST, None)
1463
    test = not vglist
1464
    _ErrorIf(test, self.ENODELVM, node, "unable to check volume groups")
1465
    if not test:
1466
      vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
1467
                                            constants.MIN_VG_SIZE)
1468
      _ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
1469

    
1470
    # check pv names
1471
    pvlist = nresult.get(constants.NV_PVLIST, None)
1472
    test = pvlist is None
1473
    _ErrorIf(test, self.ENODELVM, node, "Can't get PV list from node")
1474
    if not test:
1475
      # check that ':' is not present in PV names, since it's a
1476
      # special character for lvcreate (denotes the range of PEs to
1477
      # use on the PV)
1478
      for _, pvname, owner_vg in pvlist:
1479
        test = ":" in pvname
1480
        _ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV"
1481
                 " '%s' of VG '%s'", pvname, owner_vg)
1482

    
1483
  def _VerifyNodeBridges(self, ninfo, nresult, bridges):
1484
    """Check the node bridges.
1485

1486
    @type ninfo: L{objects.Node}
1487
    @param ninfo: the node to check
1488
    @param nresult: the remote results for the node
1489
    @param bridges: the expected list of bridges
1490

1491
    """
1492
    if not bridges:
1493
      return
1494

    
1495
    node = ninfo.name
1496
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1497

    
1498
    missing = nresult.get(constants.NV_BRIDGES, None)
1499
    test = not isinstance(missing, list)
1500
    _ErrorIf(test, self.ENODENET, node,
1501
             "did not return valid bridge information")
1502
    if not test:
1503
      _ErrorIf(bool(missing), self.ENODENET, node, "missing bridges: %s" %
1504
               utils.CommaJoin(sorted(missing)))
1505

    
1506
  def _VerifyNodeNetwork(self, ninfo, nresult):
1507
    """Check the node network connectivity results.
1508

1509
    @type ninfo: L{objects.Node}
1510
    @param ninfo: the node to check
1511
    @param nresult: the remote results for the node
1512

1513
    """
1514
    node = ninfo.name
1515
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1516

    
1517
    test = constants.NV_NODELIST not in nresult
1518
    _ErrorIf(test, self.ENODESSH, node,
1519
             "node hasn't returned node ssh connectivity data")
1520
    if not test:
1521
      if nresult[constants.NV_NODELIST]:
1522
        for a_node, a_msg in nresult[constants.NV_NODELIST].items():
1523
          _ErrorIf(True, self.ENODESSH, node,
1524
                   "ssh communication with node '%s': %s", a_node, a_msg)
1525

    
1526
    test = constants.NV_NODENETTEST not in nresult
1527
    _ErrorIf(test, self.ENODENET, node,
1528
             "node hasn't returned node tcp connectivity data")
1529
    if not test:
1530
      if nresult[constants.NV_NODENETTEST]:
1531
        nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
1532
        for anode in nlist:
1533
          _ErrorIf(True, self.ENODENET, node,
1534
                   "tcp communication with node '%s': %s",
1535
                   anode, nresult[constants.NV_NODENETTEST][anode])
1536

    
1537
    test = constants.NV_MASTERIP not in nresult
1538
    _ErrorIf(test, self.ENODENET, node,
1539
             "node hasn't returned node master IP reachability data")
1540
    if not test:
1541
      if not nresult[constants.NV_MASTERIP]:
1542
        if node == self.master_node:
1543
          msg = "the master node cannot reach the master IP (not configured?)"
1544
        else:
1545
          msg = "cannot reach the master IP"
1546
        _ErrorIf(True, self.ENODENET, node, msg)
1547

    
1548
  def _VerifyInstance(self, instance, instanceconfig, node_image,
1549
                      diskstatus):
1550
    """Verify an instance.
1551

1552
    This function checks to see if the required block devices are
1553
    available on the instance's node.
1554

1555
    """
1556
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1557
    node_current = instanceconfig.primary_node
1558

    
1559
    node_vol_should = {}
1560
    instanceconfig.MapLVsByNode(node_vol_should)
1561

    
1562
    for node in node_vol_should:
1563
      n_img = node_image[node]
1564
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1565
        # ignore missing volumes on offline or broken nodes
1566
        continue
1567
      for volume in node_vol_should[node]:
1568
        test = volume not in n_img.volumes
1569
        _ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
1570
                 "volume %s missing on node %s", volume, node)
1571

    
1572
    if instanceconfig.admin_up:
1573
      pri_img = node_image[node_current]
1574
      test = instance not in pri_img.instances and not pri_img.offline
1575
      _ErrorIf(test, self.EINSTANCEDOWN, instance,
1576
               "instance not running on its primary node %s",
1577
               node_current)
1578

    
1579
    for node, n_img in node_image.items():
1580
      if node != node_current:
1581
        test = instance in n_img.instances
1582
        _ErrorIf(test, self.EINSTANCEWRONGNODE, instance,
1583
                 "instance should not run on node %s", node)
1584

    
1585
    diskdata = [(nname, success, status, idx)
1586
                for (nname, disks) in diskstatus.items()
1587
                for idx, (success, status) in enumerate(disks)]
1588

    
1589
    for nname, success, bdev_status, idx in diskdata:
1590
      # the 'ghost node' construction in Exec() ensures that we have a
1591
      # node here
1592
      snode = node_image[nname]
1593
      bad_snode = snode.ghost or snode.offline
1594
      _ErrorIf(instanceconfig.admin_up and not success and not bad_snode,
1595
               self.EINSTANCEFAULTYDISK, instance,
1596
               "couldn't retrieve status for disk/%s on %s: %s",
1597
               idx, nname, bdev_status)
1598
      _ErrorIf((instanceconfig.admin_up and success and
1599
                bdev_status.ldisk_status == constants.LDS_FAULTY),
1600
               self.EINSTANCEFAULTYDISK, instance,
1601
               "disk/%s on %s is faulty", idx, nname)
1602

    
1603
  def _VerifyOrphanVolumes(self, node_vol_should, node_image, reserved):
1604
    """Verify if there are any unknown volumes in the cluster.
1605

1606
    The .os, .swap and backup volumes are ignored. All other volumes are
1607
    reported as unknown.
1608

1609
    @type reserved: L{ganeti.utils.FieldSet}
1610
    @param reserved: a FieldSet of reserved volume names
1611

1612
    """
1613
    for node, n_img in node_image.items():
1614
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1615
        # skip non-healthy nodes
1616
        continue
1617
      for volume in n_img.volumes:
1618
        test = ((node not in node_vol_should or
1619
                volume not in node_vol_should[node]) and
1620
                not reserved.Matches(volume))
1621
        self._ErrorIf(test, self.ENODEORPHANLV, node,
1622
                      "volume %s is unknown", volume)
1623

    
1624
  def _VerifyOrphanInstances(self, instancelist, node_image):
1625
    """Verify the list of running instances.
1626

1627
    This checks what instances are running but unknown to the cluster.
1628

1629
    """
1630
    for node, n_img in node_image.items():
1631
      for o_inst in n_img.instances:
1632
        test = o_inst not in instancelist
1633
        self._ErrorIf(test, self.ENODEORPHANINSTANCE, node,
1634
                      "instance %s on node %s should not exist", o_inst, node)
1635

    
1636
  def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
1637
    """Verify N+1 Memory Resilience.
1638

1639
    Check that if one single node dies we can still start all the
1640
    instances it was primary for.
1641

1642
    """
1643
    for node, n_img in node_image.items():
1644
      # This code checks that every node which is now listed as
1645
      # secondary has enough memory to host all instances it is
1646
      # supposed to should a single other node in the cluster fail.
1647
      # FIXME: not ready for failover to an arbitrary node
1648
      # FIXME: does not support file-backed instances
1649
      # WARNING: we currently take into account down instances as well
1650
      # as up ones, considering that even if they're down someone
1651
      # might want to start them even in the event of a node failure.
1652
      if n_img.offline:
1653
        # we're skipping offline nodes from the N+1 warning, since
1654
        # most likely we don't have good memory infromation from them;
1655
        # we already list instances living on such nodes, and that's
1656
        # enough warning
1657
        continue
1658
      for prinode, instances in n_img.sbp.items():
1659
        needed_mem = 0
1660
        for instance in instances:
1661
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
1662
          if bep[constants.BE_AUTO_BALANCE]:
1663
            needed_mem += bep[constants.BE_MEMORY]
1664
        test = n_img.mfree < needed_mem
1665
        self._ErrorIf(test, self.ENODEN1, node,
1666
                      "not enough memory to accomodate instance failovers"
1667
                      " should node %s fail (%dMiB needed, %dMiB available)",
1668
                      prinode, needed_mem, n_img.mfree)
1669

    
1670
  def _VerifyNodeFiles(self, ninfo, nresult, file_list, local_cksum,
1671
                       master_files):
1672
    """Verifies and computes the node required file checksums.
1673

1674
    @type ninfo: L{objects.Node}
1675
    @param ninfo: the node to check
1676
    @param nresult: the remote results for the node
1677
    @param file_list: required list of files
1678
    @param local_cksum: dictionary of local files and their checksums
1679
    @param master_files: list of files that only masters should have
1680

1681
    """
1682
    node = ninfo.name
1683
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1684

    
1685
    remote_cksum = nresult.get(constants.NV_FILELIST, None)
1686
    test = not isinstance(remote_cksum, dict)
1687
    _ErrorIf(test, self.ENODEFILECHECK, node,
1688
             "node hasn't returned file checksum data")
1689
    if test:
1690
      return
1691

    
1692
    for file_name in file_list:
1693
      node_is_mc = ninfo.master_candidate
1694
      must_have = (file_name not in master_files) or node_is_mc
1695
      # missing
1696
      test1 = file_name not in remote_cksum
1697
      # invalid checksum
1698
      test2 = not test1 and remote_cksum[file_name] != local_cksum[file_name]
1699
      # existing and good
1700
      test3 = not test1 and remote_cksum[file_name] == local_cksum[file_name]
1701
      _ErrorIf(test1 and must_have, self.ENODEFILECHECK, node,
1702
               "file '%s' missing", file_name)
1703
      _ErrorIf(test2 and must_have, self.ENODEFILECHECK, node,
1704
               "file '%s' has wrong checksum", file_name)
1705
      # not candidate and this is not a must-have file
1706
      _ErrorIf(test2 and not must_have, self.ENODEFILECHECK, node,
1707
               "file '%s' should not exist on non master"
1708
               " candidates (and the file is outdated)", file_name)
1709
      # all good, except non-master/non-must have combination
1710
      _ErrorIf(test3 and not must_have, self.ENODEFILECHECK, node,
1711
               "file '%s' should not exist"
1712
               " on non master candidates", file_name)
1713

    
1714
  def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_helper,
1715
                      drbd_map):
1716
    """Verifies and the node DRBD status.
1717

1718
    @type ninfo: L{objects.Node}
1719
    @param ninfo: the node to check
1720
    @param nresult: the remote results for the node
1721
    @param instanceinfo: the dict of instances
1722
    @param drbd_helper: the configured DRBD usermode helper
1723
    @param drbd_map: the DRBD map as returned by
1724
        L{ganeti.config.ConfigWriter.ComputeDRBDMap}
1725

1726
    """
1727
    node = ninfo.name
1728
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1729

    
1730
    if drbd_helper:
1731
      helper_result = nresult.get(constants.NV_DRBDHELPER, None)
1732
      test = (helper_result == None)
1733
      _ErrorIf(test, self.ENODEDRBDHELPER, node,
1734
               "no drbd usermode helper returned")
1735
      if helper_result:
1736
        status, payload = helper_result
1737
        test = not status
1738
        _ErrorIf(test, self.ENODEDRBDHELPER, node,
1739
                 "drbd usermode helper check unsuccessful: %s", payload)
1740
        test = status and (payload != drbd_helper)
1741
        _ErrorIf(test, self.ENODEDRBDHELPER, node,
1742
                 "wrong drbd usermode helper: %s", payload)
1743

    
1744
    # compute the DRBD minors
1745
    node_drbd = {}
1746
    for minor, instance in drbd_map[node].items():
1747
      test = instance not in instanceinfo
1748
      _ErrorIf(test, self.ECLUSTERCFG, None,
1749
               "ghost instance '%s' in temporary DRBD map", instance)
1750
        # ghost instance should not be running, but otherwise we
1751
        # don't give double warnings (both ghost instance and
1752
        # unallocated minor in use)
1753
      if test:
1754
        node_drbd[minor] = (instance, False)
1755
      else:
1756
        instance = instanceinfo[instance]
1757
        node_drbd[minor] = (instance.name, instance.admin_up)
1758

    
1759
    # and now check them
1760
    used_minors = nresult.get(constants.NV_DRBDLIST, [])
1761
    test = not isinstance(used_minors, (tuple, list))
1762
    _ErrorIf(test, self.ENODEDRBD, node,
1763
             "cannot parse drbd status file: %s", str(used_minors))
1764
    if test:
1765
      # we cannot check drbd status
1766
      return
1767

    
1768
    for minor, (iname, must_exist) in node_drbd.items():
1769
      test = minor not in used_minors and must_exist
1770
      _ErrorIf(test, self.ENODEDRBD, node,
1771
               "drbd minor %d of instance %s is not active", minor, iname)
1772
    for minor in used_minors:
1773
      test = minor not in node_drbd
1774
      _ErrorIf(test, self.ENODEDRBD, node,
1775
               "unallocated drbd minor %d is in use", minor)
1776

    
1777
  def _UpdateNodeOS(self, ninfo, nresult, nimg):
1778
    """Builds the node OS structures.
1779

1780
    @type ninfo: L{objects.Node}
1781
    @param ninfo: the node to check
1782
    @param nresult: the remote results for the node
1783
    @param nimg: the node image object
1784

1785
    """
1786
    node = ninfo.name
1787
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1788

    
1789
    remote_os = nresult.get(constants.NV_OSLIST, None)
1790
    test = (not isinstance(remote_os, list) or
1791
            not compat.all(isinstance(v, list) and len(v) == 7
1792
                           for v in remote_os))
1793

    
1794
    _ErrorIf(test, self.ENODEOS, node,
1795
             "node hasn't returned valid OS data")
1796

    
1797
    nimg.os_fail = test
1798

    
1799
    if test:
1800
      return
1801

    
1802
    os_dict = {}
1803

    
1804
    for (name, os_path, status, diagnose,
1805
         variants, parameters, api_ver) in nresult[constants.NV_OSLIST]:
1806

    
1807
      if name not in os_dict:
1808
        os_dict[name] = []
1809

    
1810
      # parameters is a list of lists instead of list of tuples due to
1811
      # JSON lacking a real tuple type, fix it:
1812
      parameters = [tuple(v) for v in parameters]
1813
      os_dict[name].append((os_path, status, diagnose,
1814
                            set(variants), set(parameters), set(api_ver)))
1815

    
1816
    nimg.oslist = os_dict
1817

    
1818
  def _VerifyNodeOS(self, ninfo, nimg, base):
1819
    """Verifies the node OS list.
1820

1821
    @type ninfo: L{objects.Node}
1822
    @param ninfo: the node to check
1823
    @param nimg: the node image object
1824
    @param base: the 'template' node we match against (e.g. from the master)
1825

1826
    """
1827
    node = ninfo.name
1828
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1829

    
1830
    assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?"
1831

    
1832
    beautify_params = lambda l: ["%s: %s" % (k, v) for (k, v) in l]
1833
    for os_name, os_data in nimg.oslist.items():
1834
      assert os_data, "Empty OS status for OS %s?!" % os_name
1835
      f_path, f_status, f_diag, f_var, f_param, f_api = os_data[0]
1836
      _ErrorIf(not f_status, self.ENODEOS, node,
1837
               "Invalid OS %s (located at %s): %s", os_name, f_path, f_diag)
1838
      _ErrorIf(len(os_data) > 1, self.ENODEOS, node,
1839
               "OS '%s' has multiple entries (first one shadows the rest): %s",
1840
               os_name, utils.CommaJoin([v[0] for v in os_data]))
1841
      # this will catched in backend too
1842
      _ErrorIf(compat.any(v >= constants.OS_API_V15 for v in f_api)
1843
               and not f_var, self.ENODEOS, node,
1844
               "OS %s with API at least %d does not declare any variant",
1845
               os_name, constants.OS_API_V15)
1846
      # comparisons with the 'base' image
1847
      test = os_name not in base.oslist
1848
      _ErrorIf(test, self.ENODEOS, node,
1849
               "Extra OS %s not present on reference node (%s)",
1850
               os_name, base.name)
1851
      if test:
1852
        continue
1853
      assert base.oslist[os_name], "Base node has empty OS status?"
1854
      _, b_status, _, b_var, b_param, b_api = base.oslist[os_name][0]
1855
      if not b_status:
1856
        # base OS is invalid, skipping
1857
        continue
1858
      for kind, a, b in [("API version", f_api, b_api),
1859
                         ("variants list", f_var, b_var),
1860
                         ("parameters", beautify_params(f_param),
1861
                          beautify_params(b_param))]:
1862
        _ErrorIf(a != b, self.ENODEOS, node,
1863
                 "OS %s for %s differs from reference node %s: [%s] vs. [%s]",
1864
                 kind, os_name, base.name,
1865
                 utils.CommaJoin(sorted(a)), utils.CommaJoin(sorted(b)))
1866

    
1867
    # check any missing OSes
1868
    missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
1869
    _ErrorIf(missing, self.ENODEOS, node,
1870
             "OSes present on reference node %s but missing on this node: %s",
1871
             base.name, utils.CommaJoin(missing))
1872

    
1873
  def _VerifyOob(self, ninfo, nresult):
1874
    """Verifies out of band functionality of a node.
1875

1876
    @type ninfo: L{objects.Node}
1877
    @param ninfo: the node to check
1878
    @param nresult: the remote results for the node
1879

1880
    """
1881
    node = ninfo.name
1882
    # We just have to verify the paths on master and/or master candidates
1883
    # as the oob helper is invoked on the master
1884
    if ((ninfo.master_candidate or ninfo.master_capable) and
1885
        constants.NV_OOB_PATHS in nresult):
1886
      for path_result in nresult[constants.NV_OOB_PATHS]:
1887
        self._ErrorIf(path_result, self.ENODEOOBPATH, node, path_result)
1888

    
1889
  def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
1890
    """Verifies and updates the node volume data.
1891

1892
    This function will update a L{NodeImage}'s internal structures
1893
    with data from the remote call.
1894

1895
    @type ninfo: L{objects.Node}
1896
    @param ninfo: the node to check
1897
    @param nresult: the remote results for the node
1898
    @param nimg: the node image object
1899
    @param vg_name: the configured VG name
1900

1901
    """
1902
    node = ninfo.name
1903
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1904

    
1905
    nimg.lvm_fail = True
1906
    lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1907
    if vg_name is None:
1908
      pass
1909
    elif isinstance(lvdata, basestring):
1910
      _ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s",
1911
               utils.SafeEncode(lvdata))
1912
    elif not isinstance(lvdata, dict):
1913
      _ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)")
1914
    else:
1915
      nimg.volumes = lvdata
1916
      nimg.lvm_fail = False
1917

    
1918
  def _UpdateNodeInstances(self, ninfo, nresult, nimg):
1919
    """Verifies and updates the node instance list.
1920

1921
    If the listing was successful, then updates this node's instance
1922
    list. Otherwise, it marks the RPC call as failed for the instance
1923
    list key.
1924

1925
    @type ninfo: L{objects.Node}
1926
    @param ninfo: the node to check
1927
    @param nresult: the remote results for the node
1928
    @param nimg: the node image object
1929

1930
    """
1931
    idata = nresult.get(constants.NV_INSTANCELIST, None)
1932
    test = not isinstance(idata, list)
1933
    self._ErrorIf(test, self.ENODEHV, ninfo.name, "rpc call to node failed"
1934
                  " (instancelist): %s", utils.SafeEncode(str(idata)))
1935
    if test:
1936
      nimg.hyp_fail = True
1937
    else:
1938
      nimg.instances = idata
1939

    
1940
  def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
1941
    """Verifies and computes a node information map
1942

1943
    @type ninfo: L{objects.Node}
1944
    @param ninfo: the node to check
1945
    @param nresult: the remote results for the node
1946
    @param nimg: the node image object
1947
    @param vg_name: the configured VG name
1948

1949
    """
1950
    node = ninfo.name
1951
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1952

    
1953
    # try to read free memory (from the hypervisor)
1954
    hv_info = nresult.get(constants.NV_HVINFO, None)
1955
    test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
1956
    _ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)")
1957
    if not test:
1958
      try:
1959
        nimg.mfree = int(hv_info["memory_free"])
1960
      except (ValueError, TypeError):
1961
        _ErrorIf(True, self.ENODERPC, node,
1962
                 "node returned invalid nodeinfo, check hypervisor")
1963

    
1964
    # FIXME: devise a free space model for file based instances as well
1965
    if vg_name is not None:
1966
      test = (constants.NV_VGLIST not in nresult or
1967
              vg_name not in nresult[constants.NV_VGLIST])
1968
      _ErrorIf(test, self.ENODELVM, node,
1969
               "node didn't return data for the volume group '%s'"
1970
               " - it is either missing or broken", vg_name)
1971
      if not test:
1972
        try:
1973
          nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
1974
        except (ValueError, TypeError):
1975
          _ErrorIf(True, self.ENODERPC, node,
1976
                   "node returned invalid LVM info, check LVM status")
1977

    
1978
  def _CollectDiskInfo(self, nodelist, node_image, instanceinfo):
1979
    """Gets per-disk status information for all instances.
1980

1981
    @type nodelist: list of strings
1982
    @param nodelist: Node names
1983
    @type node_image: dict of (name, L{objects.Node})
1984
    @param node_image: Node objects
1985
    @type instanceinfo: dict of (name, L{objects.Instance})
1986
    @param instanceinfo: Instance objects
1987
    @rtype: {instance: {node: [(succes, payload)]}}
1988
    @return: a dictionary of per-instance dictionaries with nodes as
1989
        keys and disk information as values; the disk information is a
1990
        list of tuples (success, payload)
1991

1992
    """
1993
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1994

    
1995
    node_disks = {}
1996
    node_disks_devonly = {}
1997
    diskless_instances = set()
1998
    diskless = constants.DT_DISKLESS
1999

    
2000
    for nname in nodelist:
2001
      node_instances = list(itertools.chain(node_image[nname].pinst,
2002
                                            node_image[nname].sinst))
2003
      diskless_instances.update(inst for inst in node_instances
2004
                                if instanceinfo[inst].disk_template == diskless)
2005
      disks = [(inst, disk)
2006
               for inst in node_instances
2007
               for disk in instanceinfo[inst].disks]
2008

    
2009
      if not disks:
2010
        # No need to collect data
2011
        continue
2012

    
2013
      node_disks[nname] = disks
2014

    
2015
      # Creating copies as SetDiskID below will modify the objects and that can
2016
      # lead to incorrect data returned from nodes
2017
      devonly = [dev.Copy() for (_, dev) in disks]
2018

    
2019
      for dev in devonly:
2020
        self.cfg.SetDiskID(dev, nname)
2021

    
2022
      node_disks_devonly[nname] = devonly
2023

    
2024
    assert len(node_disks) == len(node_disks_devonly)
2025

    
2026
    # Collect data from all nodes with disks
2027
    result = self.rpc.call_blockdev_getmirrorstatus_multi(node_disks.keys(),
2028
                                                          node_disks_devonly)
2029

    
2030
    assert len(result) == len(node_disks)
2031

    
2032
    instdisk = {}
2033

    
2034
    for (nname, nres) in result.items():
2035
      disks = node_disks[nname]
2036

    
2037
      if nres.offline:
2038
        # No data from this node
2039
        data = len(disks) * [(False, "node offline")]
2040
      else:
2041
        msg = nres.fail_msg
2042
        _ErrorIf(msg, self.ENODERPC, nname,
2043
                 "while getting disk information: %s", msg)
2044
        if msg:
2045
          # No data from this node
2046
          data = len(disks) * [(False, msg)]
2047
        else:
2048
          data = []
2049
          for idx, i in enumerate(nres.payload):
2050
            if isinstance(i, (tuple, list)) and len(i) == 2:
2051
              data.append(i)
2052
            else:
2053
              logging.warning("Invalid result from node %s, entry %d: %s",
2054
                              nname, idx, i)
2055
              data.append((False, "Invalid result from the remote node"))
2056

    
2057
      for ((inst, _), status) in zip(disks, data):
2058
        instdisk.setdefault(inst, {}).setdefault(nname, []).append(status)
2059

    
2060
    # Add empty entries for diskless instances.
2061
    for inst in diskless_instances:
2062
      assert inst not in instdisk
2063
      instdisk[inst] = {}
2064

    
2065
    assert compat.all(len(statuses) == len(instanceinfo[inst].disks) and
2066
                      len(nnames) <= len(instanceinfo[inst].all_nodes) and
2067
                      compat.all(isinstance(s, (tuple, list)) and
2068
                                 len(s) == 2 for s in statuses)
2069
                      for inst, nnames in instdisk.items()
2070
                      for nname, statuses in nnames.items())
2071
    assert set(instdisk) == set(instanceinfo), "instdisk consistency failure"
2072

    
2073
    return instdisk
2074

    
2075
  def _VerifyHVP(self, hvp_data):
2076
    """Verifies locally the syntax of the hypervisor parameters.
2077

2078
    """
2079
    for item, hv_name, hv_params in hvp_data:
2080
      msg = ("hypervisor %s parameters syntax check (source %s): %%s" %
2081
             (item, hv_name))
2082
      try:
2083
        hv_class = hypervisor.GetHypervisor(hv_name)
2084
        utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2085
        hv_class.CheckParameterSyntax(hv_params)
2086
      except errors.GenericError, err:
2087
        self._ErrorIf(True, self.ECLUSTERCFG, None, msg % str(err))
2088

    
2089

    
2090
  def BuildHooksEnv(self):
2091
    """Build hooks env.
2092

2093
    Cluster-Verify hooks just ran in the post phase and their failure makes
2094
    the output be logged in the verify output and the verification to fail.
2095

2096
    """
2097
    all_nodes = self.cfg.GetNodeList()
2098
    env = {
2099
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
2100
      }
2101
    for node in self.cfg.GetAllNodesInfo().values():
2102
      env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
2103

    
2104
    return env, [], all_nodes
2105

    
2106
  def Exec(self, feedback_fn):
2107
    """Verify integrity of cluster, performing various test on nodes.
2108

2109
    """
2110
    # This method has too many local variables. pylint: disable-msg=R0914
2111
    self.bad = False
2112
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
2113
    verbose = self.op.verbose
2114
    self._feedback_fn = feedback_fn
2115
    feedback_fn("* Verifying global settings")
2116
    for msg in self.cfg.VerifyConfig():
2117
      _ErrorIf(True, self.ECLUSTERCFG, None, msg)
2118

    
2119
    # Check the cluster certificates
2120
    for cert_filename in constants.ALL_CERT_FILES:
2121
      (errcode, msg) = _VerifyCertificate(cert_filename)
2122
      _ErrorIf(errcode, self.ECLUSTERCERT, None, msg, code=errcode)
2123

    
2124
    vg_name = self.cfg.GetVGName()
2125
    drbd_helper = self.cfg.GetDRBDHelper()
2126
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
2127
    cluster = self.cfg.GetClusterInfo()
2128
    nodeinfo_byname = self.cfg.GetAllNodesInfo()
2129
    nodelist = utils.NiceSort(nodeinfo_byname.keys())
2130
    nodeinfo = [nodeinfo_byname[nname] for nname in nodelist]
2131
    instanceinfo = self.cfg.GetAllInstancesInfo()
2132
    instancelist = utils.NiceSort(instanceinfo.keys())
2133
    groupinfo = self.cfg.GetAllNodeGroupsInfo()
2134
    i_non_redundant = [] # Non redundant instances
2135
    i_non_a_balanced = [] # Non auto-balanced instances
2136
    n_offline = 0 # Count of offline nodes
2137
    n_drained = 0 # Count of nodes being drained
2138
    node_vol_should = {}
2139

    
2140
    # FIXME: verify OS list
2141
    # do local checksums
2142
    master_files = [constants.CLUSTER_CONF_FILE]
2143
    master_node = self.master_node = self.cfg.GetMasterNode()
2144
    master_ip = self.cfg.GetMasterIP()
2145

    
2146
    file_names = ssconf.SimpleStore().GetFileList()
2147
    file_names.extend(constants.ALL_CERT_FILES)
2148
    file_names.extend(master_files)
2149
    if cluster.modify_etc_hosts:
2150
      file_names.append(constants.ETC_HOSTS)
2151

    
2152
    local_checksums = utils.FingerprintFiles(file_names)
2153

    
2154
    # Compute the set of hypervisor parameters
2155
    hvp_data = []
2156
    for hv_name in hypervisors:
2157
      hvp_data.append(("cluster", hv_name, cluster.GetHVDefaults(hv_name)))
2158
    for os_name, os_hvp in cluster.os_hvp.items():
2159
      for hv_name, hv_params in os_hvp.items():
2160
        if not hv_params:
2161
          continue
2162
        full_params = cluster.GetHVDefaults(hv_name, os_name=os_name)
2163
        hvp_data.append(("os %s" % os_name, hv_name, full_params))
2164
    # TODO: collapse identical parameter values in a single one
2165
    for instance in instanceinfo.values():
2166
      if not instance.hvparams:
2167
        continue
2168
      hvp_data.append(("instance %s" % instance.name, instance.hypervisor,
2169
                       cluster.FillHV(instance)))
2170
    # and verify them locally
2171
    self._VerifyHVP(hvp_data)
2172

    
2173
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
2174
    node_verify_param = {
2175
      constants.NV_FILELIST: file_names,
2176
      constants.NV_NODELIST: [node.name for node in nodeinfo
2177
                              if not node.offline],
2178
      constants.NV_HYPERVISOR: hypervisors,
2179
      constants.NV_HVPARAMS: hvp_data,
2180
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
2181
                                  node.secondary_ip) for node in nodeinfo
2182
                                 if not node.offline],
2183
      constants.NV_INSTANCELIST: hypervisors,
2184
      constants.NV_VERSION: None,
2185
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
2186
      constants.NV_NODESETUP: None,
2187
      constants.NV_TIME: None,
2188
      constants.NV_MASTERIP: (master_node, master_ip),
2189
      constants.NV_OSLIST: None,
2190
      constants.NV_VMNODES: self.cfg.GetNonVmCapableNodeList(),
2191
      }
2192

    
2193
    if vg_name is not None:
2194
      node_verify_param[constants.NV_VGLIST] = None
2195
      node_verify_param[constants.NV_LVLIST] = vg_name
2196
      node_verify_param[constants.NV_PVLIST] = [vg_name]
2197
      node_verify_param[constants.NV_DRBDLIST] = None
2198

    
2199
    if drbd_helper:
2200
      node_verify_param[constants.NV_DRBDHELPER] = drbd_helper
2201

    
2202
    # bridge checks
2203
    # FIXME: this needs to be changed per node-group, not cluster-wide
2204
    bridges = set()
2205
    default_nicpp = cluster.nicparams[constants.PP_DEFAULT]
2206
    if default_nicpp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
2207
      bridges.add(default_nicpp[constants.NIC_LINK])
2208
    for instance in instanceinfo.values():
2209
      for nic in instance.nics:
2210
        full_nic = cluster.SimpleFillNIC(nic.nicparams)
2211
        if full_nic[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
2212
          bridges.add(full_nic[constants.NIC_LINK])
2213

    
2214
    if bridges:
2215
      node_verify_param[constants.NV_BRIDGES] = list(bridges)
2216

    
2217
    # Build our expected cluster state
2218
    node_image = dict((node.name, self.NodeImage(offline=node.offline,
2219
                                                 name=node.name,
2220
                                                 vm_capable=node.vm_capable))
2221
                      for node in nodeinfo)
2222

    
2223
    # Gather OOB paths
2224
    oob_paths = []
2225
    for node in nodeinfo:
2226
      path = _SupportsOob(self.cfg, node)
2227
      if path and path not in oob_paths:
2228
        oob_paths.append(path)
2229

    
2230
    if oob_paths:
2231
      node_verify_param[constants.NV_OOB_PATHS] = oob_paths
2232

    
2233
    for instance in instancelist:
2234
      inst_config = instanceinfo[instance]
2235

    
2236
      for nname in inst_config.all_nodes:
2237
        if nname not in node_image:
2238
          # ghost node
2239
          gnode = self.NodeImage(name=nname)
2240
          gnode.ghost = True
2241
          node_image[nname] = gnode
2242

    
2243
      inst_config.MapLVsByNode(node_vol_should)
2244

    
2245
      pnode = inst_config.primary_node
2246
      node_image[pnode].pinst.append(instance)
2247

    
2248
      for snode in inst_config.secondary_nodes:
2249
        nimg = node_image[snode]
2250
        nimg.sinst.append(instance)
2251
        if pnode not in nimg.sbp:
2252
          nimg.sbp[pnode] = []
2253
        nimg.sbp[pnode].append(instance)
2254

    
2255
    # At this point, we have the in-memory data structures complete,
2256
    # except for the runtime information, which we'll gather next
2257

    
2258
    # Due to the way our RPC system works, exact response times cannot be
2259
    # guaranteed (e.g. a broken node could run into a timeout). By keeping the
2260
    # time before and after executing the request, we can at least have a time
2261
    # window.
2262
    nvinfo_starttime = time.time()
2263
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
2264
                                           self.cfg.GetClusterName())
2265
    nvinfo_endtime = time.time()
2266

    
2267
    all_drbd_map = self.cfg.ComputeDRBDMap()
2268

    
2269
    feedback_fn("* Gathering disk information (%s nodes)" % len(nodelist))
2270
    instdisk = self._CollectDiskInfo(nodelist, node_image, instanceinfo)
2271

    
2272
    feedback_fn("* Verifying node status")
2273

    
2274
    refos_img = None
2275

    
2276
    for node_i in nodeinfo:
2277
      node = node_i.name
2278
      nimg = node_image[node]
2279

    
2280
      if node_i.offline:
2281
        if verbose:
2282
          feedback_fn("* Skipping offline node %s" % (node,))
2283
        n_offline += 1
2284
        continue
2285

    
2286
      if node == master_node:
2287
        ntype = "master"
2288
      elif node_i.master_candidate:
2289
        ntype = "master candidate"
2290
      elif node_i.drained:
2291
        ntype = "drained"
2292
        n_drained += 1
2293
      else:
2294
        ntype = "regular"
2295
      if verbose:
2296
        feedback_fn("* Verifying node %s (%s)" % (node, ntype))
2297

    
2298
      msg = all_nvinfo[node].fail_msg
2299
      _ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg)
2300
      if msg:
2301
        nimg.rpc_fail = True
2302
        continue
2303

    
2304
      nresult = all_nvinfo[node].payload
2305

    
2306
      nimg.call_ok = self._VerifyNode(node_i, nresult)
2307
      self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
2308
      self._VerifyNodeNetwork(node_i, nresult)
2309
      self._VerifyNodeFiles(node_i, nresult, file_names, local_checksums,
2310
                            master_files)
2311

    
2312
      self._VerifyOob(node_i, nresult)
2313

    
2314
      if nimg.vm_capable:
2315
        self._VerifyNodeLVM(node_i, nresult, vg_name)
2316
        self._VerifyNodeDrbd(node_i, nresult, instanceinfo, drbd_helper,
2317
                             all_drbd_map)
2318

    
2319
        self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
2320
        self._UpdateNodeInstances(node_i, nresult, nimg)
2321
        self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
2322
        self._UpdateNodeOS(node_i, nresult, nimg)
2323
        if not nimg.os_fail:
2324
          if refos_img is None:
2325
            refos_img = nimg
2326
          self._VerifyNodeOS(node_i, nimg, refos_img)
2327
        self._VerifyNodeBridges(node_i, nresult, bridges)
2328

    
2329
    feedback_fn("* Verifying instance status")
2330
    for instance in instancelist:
2331
      if verbose:
2332
        feedback_fn("* Verifying instance %s" % instance)
2333
      inst_config = instanceinfo[instance]
2334
      self._VerifyInstance(instance, inst_config, node_image,
2335
                           instdisk[instance])
2336
      inst_nodes_offline = []
2337

    
2338
      pnode = inst_config.primary_node
2339
      pnode_img = node_image[pnode]
2340
      _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
2341
               self.ENODERPC, pnode, "instance %s, connection to"
2342
               " primary node failed", instance)
2343

    
2344
      _ErrorIf(pnode_img.offline, self.EINSTANCEBADNODE, instance,
2345
               "instance lives on offline node %s", inst_config.primary_node)
2346

    
2347
      # If the instance is non-redundant we cannot survive losing its primary
2348
      # node, so we are not N+1 compliant. On the other hand we have no disk
2349
      # templates with more than one secondary so that situation is not well
2350
      # supported either.
2351
      # FIXME: does not support file-backed instances
2352
      if not inst_config.secondary_nodes:
2353
        i_non_redundant.append(instance)
2354

    
2355
      _ErrorIf(len(inst_config.secondary_nodes) > 1, self.EINSTANCELAYOUT,
2356
               instance, "instance has multiple secondary nodes: %s",
2357
               utils.CommaJoin(inst_config.secondary_nodes),
2358
               code=self.ETYPE_WARNING)
2359

    
2360
      if inst_config.disk_template in constants.DTS_NET_MIRROR:
2361
        pnode = inst_config.primary_node
2362
        instance_nodes = utils.NiceSort(inst_config.all_nodes)
2363
        instance_groups = {}
2364

    
2365
        for node in instance_nodes:
2366
          instance_groups.setdefault(nodeinfo_byname[node].group,
2367
                                     []).append(node)
2368

    
2369
        pretty_list = [
2370
          "%s (group %s)" % (utils.CommaJoin(nodes), groupinfo[group].name)
2371
          # Sort so that we always list the primary node first.
2372
          for group, nodes in sorted(instance_groups.items(),
2373
                                     key=lambda (_, nodes): pnode in nodes,
2374
                                     reverse=True)]
2375

    
2376
        self._ErrorIf(len(instance_groups) > 1, self.EINSTANCESPLITGROUPS,
2377
                      instance, "instance has primary and secondary nodes in"
2378
                      " different groups: %s", utils.CommaJoin(pretty_list),
2379
                      code=self.ETYPE_WARNING)
2380

    
2381
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
2382
        i_non_a_balanced.append(instance)
2383

    
2384
      for snode in inst_config.secondary_nodes:
2385
        s_img = node_image[snode]
2386
        _ErrorIf(s_img.rpc_fail and not s_img.offline, self.ENODERPC, snode,
2387
                 "instance %s, connection to secondary node failed", instance)
2388

    
2389
        if s_img.offline:
2390
          inst_nodes_offline.append(snode)
2391

    
2392
      # warn that the instance lives on offline nodes
2393
      _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
2394
               "instance has offline secondary node(s) %s",
2395
               utils.CommaJoin(inst_nodes_offline))
2396
      # ... or ghost/non-vm_capable nodes
2397
      for node in inst_config.all_nodes:
2398
        _ErrorIf(node_image[node].ghost, self.EINSTANCEBADNODE, instance,
2399
                 "instance lives on ghost node %s", node)
2400
        _ErrorIf(not node_image[node].vm_capable, self.EINSTANCEBADNODE,
2401
                 instance, "instance lives on non-vm_capable node %s", node)
2402

    
2403
    feedback_fn("* Verifying orphan volumes")
2404
    reserved = utils.FieldSet(*cluster.reserved_lvs)
2405
    self._VerifyOrphanVolumes(node_vol_should, node_image, reserved)
2406

    
2407
    feedback_fn("* Verifying orphan instances")
2408
    self._VerifyOrphanInstances(instancelist, node_image)
2409

    
2410
    if constants.VERIFY_NPLUSONE_MEM not in self.op.skip_checks:
2411
      feedback_fn("* Verifying N+1 Memory redundancy")
2412
      self._VerifyNPlusOneMemory(node_image, instanceinfo)
2413

    
2414
    feedback_fn("* Other Notes")
2415
    if i_non_redundant:
2416
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
2417
                  % len(i_non_redundant))
2418

    
2419
    if i_non_a_balanced:
2420
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
2421
                  % len(i_non_a_balanced))
2422

    
2423
    if n_offline:
2424
      feedback_fn("  - NOTICE: %d offline node(s) found." % n_offline)
2425

    
2426
    if n_drained:
2427
      feedback_fn("  - NOTICE: %d drained node(s) found." % n_drained)
2428

    
2429
    return not self.bad
2430

    
2431
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
2432
    """Analyze the post-hooks' result
2433

2434
    This method analyses the hook result, handles it, and sends some
2435
    nicely-formatted feedback back to the user.
2436

2437
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
2438
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
2439
    @param hooks_results: the results of the multi-node hooks rpc call
2440
    @param feedback_fn: function used send feedback back to the caller
2441
    @param lu_result: previous Exec result
2442
    @return: the new Exec result, based on the previous result
2443
        and hook results
2444

2445
    """
2446
    # We only really run POST phase hooks, and are only interested in
2447
    # their results
2448
    if phase == constants.HOOKS_PHASE_POST:
2449
      # Used to change hooks' output to proper indentation
2450
      feedback_fn("* Hooks Results")
2451
      assert hooks_results, "invalid result from hooks"
2452

    
2453
      for node_name in hooks_results:
2454
        res = hooks_results[node_name]
2455
        msg = res.fail_msg
2456
        test = msg and not res.offline
2457
        self._ErrorIf(test, self.ENODEHOOKS, node_name,
2458
                      "Communication failure in hooks execution: %s", msg)
2459
        if res.offline or msg:
2460
          # No need to investigate payload if node is offline or gave an error.
2461
          # override manually lu_result here as _ErrorIf only
2462
          # overrides self.bad
2463
          lu_result = 1
2464
          continue
2465
        for script, hkr, output in res.payload:
2466
          test = hkr == constants.HKR_FAIL
2467
          self._ErrorIf(test, self.ENODEHOOKS, node_name,
2468
                        "Script %s failed, output:", script)
2469
          if test:
2470
            output = self._HOOKS_INDENT_RE.sub('      ', output)
2471
            feedback_fn("%s" % output)
2472
            lu_result = 0
2473

    
2474
      return lu_result
2475

    
2476

    
2477
class LUClusterVerifyDisks(NoHooksLU):
2478
  """Verifies the cluster disks status.
2479

2480
  """
2481
  REQ_BGL = False
2482

    
2483
  def ExpandNames(self):
2484
    self.needed_locks = {
2485
      locking.LEVEL_NODE: locking.ALL_SET,
2486
      locking.LEVEL_INSTANCE: locking.ALL_SET,
2487
    }
2488
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
2489

    
2490
  def Exec(self, feedback_fn):
2491
    """Verify integrity of cluster disks.
2492

2493
    @rtype: tuple of three items
2494
    @return: a tuple of (dict of node-to-node_error, list of instances
2495
        which need activate-disks, dict of instance: (node, volume) for
2496
        missing volumes
2497

2498
    """
2499
    result = res_nodes, res_instances, res_missing = {}, [], {}
2500

    
2501
    nodes = utils.NiceSort(self.cfg.GetVmCapableNodeList())
2502
    instances = self.cfg.GetAllInstancesInfo().values()
2503

    
2504
    nv_dict = {}
2505
    for inst in instances:
2506
      inst_lvs = {}
2507
      if not inst.admin_up:
2508
        continue
2509
      inst.MapLVsByNode(inst_lvs)
2510
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
2511
      for node, vol_list in inst_lvs.iteritems():
2512
        for vol in vol_list:
2513
          nv_dict[(node, vol)] = inst
2514

    
2515
    if not nv_dict:
2516
      return result
2517

    
2518
    node_lvs = self.rpc.call_lv_list(nodes, [])
2519
    for node, node_res in node_lvs.items():
2520
      if node_res.offline:
2521
        continue
2522
      msg = node_res.fail_msg
2523
      if msg:
2524
        logging.warning("Error enumerating LVs on node %s: %s", node, msg)
2525
        res_nodes[node] = msg
2526
        continue
2527

    
2528
      lvs = node_res.payload
2529
      for lv_name, (_, _, lv_online) in lvs.items():
2530
        inst = nv_dict.pop((node, lv_name), None)
2531
        if (not lv_online and inst is not None
2532
            and inst.name not in res_instances):
2533
          res_instances.append(inst.name)
2534

    
2535
    # any leftover items in nv_dict are missing LVs, let's arrange the
2536
    # data better
2537
    for key, inst in nv_dict.iteritems():
2538
      if inst.name not in res_missing:
2539
        res_missing[inst.name] = []
2540
      res_missing[inst.name].append(key)
2541

    
2542
    return result
2543

    
2544

    
2545
class LUClusterRepairDiskSizes(NoHooksLU):
2546
  """Verifies the cluster disks sizes.
2547

2548
  """
2549
  REQ_BGL = False
2550

    
2551
  def ExpandNames(self):
2552
    if self.op.instances:
2553
      self.wanted_names = []
2554
      for name in self.op.instances:
2555
        full_name = _ExpandInstanceName(self.cfg, name)
2556
        self.wanted_names.append(full_name)
2557
      self.needed_locks = {
2558
        locking.LEVEL_NODE: [],
2559
        locking.LEVEL_INSTANCE: self.wanted_names,
2560
        }
2561
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2562
    else:
2563
      self.wanted_names = None
2564
      self.needed_locks = {
2565
        locking.LEVEL_NODE: locking.ALL_SET,
2566
        locking.LEVEL_INSTANCE: locking.ALL_SET,
2567
        }
2568
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
2569

    
2570
  def DeclareLocks(self, level):
2571
    if level == locking.LEVEL_NODE and self.wanted_names is not None:
2572
      self._LockInstancesNodes(primary_only=True)
2573

    
2574
  def CheckPrereq(self):
2575
    """Check prerequisites.
2576

2577
    This only checks the optional instance list against the existing names.
2578

2579
    """
2580
    if self.wanted_names is None:
2581
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2582

    
2583
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
2584
                             in self.wanted_names]
2585

    
2586
  def _EnsureChildSizes(self, disk):
2587
    """Ensure children of the disk have the needed disk size.
2588

2589
    This is valid mainly for DRBD8 and fixes an issue where the
2590
    children have smaller disk size.
2591

2592
    @param disk: an L{ganeti.objects.Disk} object
2593

2594
    """
2595
    if disk.dev_type == constants.LD_DRBD8:
2596
      assert disk.children, "Empty children for DRBD8?"
2597
      fchild = disk.children[0]
2598
      mismatch = fchild.size < disk.size
2599
      if mismatch:
2600
        self.LogInfo("Child disk has size %d, parent %d, fixing",
2601
                     fchild.size, disk.size)
2602
        fchild.size = disk.size
2603

    
2604
      # and we recurse on this child only, not on the metadev
2605
      return self._EnsureChildSizes(fchild) or mismatch
2606
    else:
2607
      return False
2608

    
2609
  def Exec(self, feedback_fn):
2610
    """Verify the size of cluster disks.
2611

2612
    """
2613
    # TODO: check child disks too
2614
    # TODO: check differences in size between primary/secondary nodes
2615
    per_node_disks = {}
2616
    for instance in self.wanted_instances:
2617
      pnode = instance.primary_node
2618
      if pnode not in per_node_disks:
2619
        per_node_disks[pnode] = []
2620
      for idx, disk in enumerate(instance.disks):
2621
        per_node_disks[pnode].append((instance, idx, disk))
2622

    
2623
    changed = []
2624
    for node, dskl in per_node_disks.items():
2625
      newl = [v[2].Copy() for v in dskl]
2626
      for dsk in newl:
2627
        self.cfg.SetDiskID(dsk, node)
2628
      result = self.rpc.call_blockdev_getsize(node, newl)
2629
      if result.fail_msg:
2630
        self.LogWarning("Failure in blockdev_getsize call to node"
2631
                        " %s, ignoring", node)
2632
        continue
2633
      if len(result.payload) != len(dskl):
2634
        logging.warning("Invalid result from node %s: len(dksl)=%d,"
2635
                        " result.payload=%s", node, len(dskl), result.payload)
2636
        self.LogWarning("Invalid result from node %s, ignoring node results",
2637
                        node)
2638
        continue
2639
      for ((instance, idx, disk), size) in zip(dskl, result.payload):
2640
        if size is None:
2641
          self.LogWarning("Disk %d of instance %s did not return size"
2642
                          " information, ignoring", idx, instance.name)
2643
          continue
2644
        if not isinstance(size, (int, long)):
2645
          self.LogWarning("Disk %d of instance %s did not return valid"
2646
                          " size information, ignoring", idx, instance.name)
2647
          continue
2648
        size = size >> 20
2649
        if size != disk.size:
2650
          self.LogInfo("Disk %d of instance %s has mismatched size,"
2651
                       " correcting: recorded %d, actual %d", idx,
2652
                       instance.name, disk.size, size)
2653
          disk.size = size
2654
          self.cfg.Update(instance, feedback_fn)
2655
          changed.append((instance.name, idx, size))
2656
        if self._EnsureChildSizes(disk):
2657
          self.cfg.Update(instance, feedback_fn)
2658
          changed.append((instance.name, idx, disk.size))
2659
    return changed
2660

    
2661

    
2662
class LUClusterRename(LogicalUnit):
2663
  """Rename the cluster.
2664

2665
  """
2666
  HPATH = "cluster-rename"
2667
  HTYPE = constants.HTYPE_CLUSTER
2668

    
2669
  def BuildHooksEnv(self):
2670
    """Build hooks env.
2671

2672
    """
2673
    env = {
2674
      "OP_TARGET": self.cfg.GetClusterName(),
2675
      "NEW_NAME": self.op.name,
2676
      }
2677
    mn = self.cfg.GetMasterNode()
2678
    all_nodes = self.cfg.GetNodeList()
2679
    return env, [mn], all_nodes
2680

    
2681
  def CheckPrereq(self):
2682
    """Verify that the passed name is a valid one.
2683

2684
    """
2685
    hostname = netutils.GetHostname(name=self.op.name,
2686
                                    family=self.cfg.GetPrimaryIPFamily())
2687

    
2688
    new_name = hostname.name
2689
    self.ip = new_ip = hostname.ip
2690
    old_name = self.cfg.GetClusterName()
2691
    old_ip = self.cfg.GetMasterIP()
2692
    if new_name == old_name and new_ip == old_ip:
2693
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
2694
                                 " cluster has changed",
2695
                                 errors.ECODE_INVAL)
2696
    if new_ip != old_ip:
2697
      if netutils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
2698
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
2699
                                   " reachable on the network" %
2700
                                   new_ip, errors.ECODE_NOTUNIQUE)
2701

    
2702
    self.op.name = new_name
2703

    
2704
  def Exec(self, feedback_fn):
2705
    """Rename the cluster.
2706

2707
    """
2708
    clustername = self.op.name
2709
    ip = self.ip
2710

    
2711
    # shutdown the master IP
2712
    master = self.cfg.GetMasterNode()
2713
    result = self.rpc.call_node_stop_master(master, False)
2714
    result.Raise("Could not disable the master role")
2715

    
2716
    try:
2717
      cluster = self.cfg.GetClusterInfo()
2718
      cluster.cluster_name = clustername
2719
      cluster.master_ip = ip
2720
      self.cfg.Update(cluster, feedback_fn)
2721

    
2722
      # update the known hosts file
2723
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
2724
      node_list = self.cfg.GetOnlineNodeList()
2725
      try:
2726
        node_list.remove(master)
2727
      except ValueError:
2728
        pass
2729
      _UploadHelper(self, node_list, constants.SSH_KNOWN_HOSTS_FILE)
2730
    finally:
2731
      result = self.rpc.call_node_start_master(master, False, False)
2732
      msg = result.fail_msg
2733
      if msg:
2734
        self.LogWarning("Could not re-enable the master role on"
2735
                        " the master, please restart manually: %s", msg)
2736

    
2737
    return clustername
2738

    
2739

    
2740
class LUClusterSetParams(LogicalUnit):
2741
  """Change the parameters of the cluster.
2742

2743
  """
2744
  HPATH = "cluster-modify"
2745
  HTYPE = constants.HTYPE_CLUSTER
2746
  REQ_BGL = False
2747

    
2748
  def CheckArguments(self):
2749
    """Check parameters
2750

2751
    """
2752
    if self.op.uid_pool:
2753
      uidpool.CheckUidPool(self.op.uid_pool)
2754

    
2755
    if self.op.add_uids:
2756
      uidpool.CheckUidPool(self.op.add_uids)
2757

    
2758
    if self.op.remove_uids:
2759
      uidpool.CheckUidPool(self.op.remove_uids)
2760

    
2761
  def ExpandNames(self):
2762
    # FIXME: in the future maybe other cluster params won't require checking on
2763
    # all nodes to be modified.
2764
    self.needed_locks = {
2765
      locking.LEVEL_NODE: locking.ALL_SET,
2766
    }
2767
    self.share_locks[locking.LEVEL_NODE] = 1
2768

    
2769
  def BuildHooksEnv(self):
2770
    """Build hooks env.
2771

2772
    """
2773
    env = {
2774
      "OP_TARGET": self.cfg.GetClusterName(),
2775
      "NEW_VG_NAME": self.op.vg_name,
2776
      }
2777
    mn = self.cfg.GetMasterNode()
2778
    return env, [mn], [mn]
2779

    
2780
  def CheckPrereq(self):
2781
    """Check prerequisites.
2782

2783
    This checks whether the given params don't conflict and
2784
    if the given volume group is valid.
2785

2786
    """
2787
    if self.op.vg_name is not None and not self.op.vg_name:
2788
      if self.cfg.HasAnyDiskOfType(constants.LD_LV):
2789
        raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
2790
                                   " instances exist", errors.ECODE_INVAL)
2791

    
2792
    if self.op.drbd_helper is not None and not self.op.drbd_helper:
2793
      if self.cfg.HasAnyDiskOfType(constants.LD_DRBD8):
2794
        raise errors.OpPrereqError("Cannot disable drbd helper while"
2795
                                   " drbd-based instances exist",
2796
                                   errors.ECODE_INVAL)
2797

    
2798
    node_list = self.acquired_locks[locking.LEVEL_NODE]
2799

    
2800
    # if vg_name not None, checks given volume group on all nodes
2801
    if self.op.vg_name:
2802
      vglist = self.rpc.call_vg_list(node_list)
2803
      for node in node_list:
2804
        msg = vglist[node].fail_msg
2805
        if msg:
2806
          # ignoring down node
2807
          self.LogWarning("Error while gathering data on node %s"
2808
                          " (ignoring node): %s", node, msg)
2809
          continue
2810
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
2811
                                              self.op.vg_name,
2812
                                              constants.MIN_VG_SIZE)
2813
        if vgstatus:
2814
          raise errors.OpPrereqError("Error on node '%s': %s" %
2815
                                     (node, vgstatus), errors.ECODE_ENVIRON)
2816

    
2817
    if self.op.drbd_helper:
2818
      # checks given drbd helper on all nodes
2819
      helpers = self.rpc.call_drbd_helper(node_list)
2820
      for node in node_list:
2821
        ninfo = self.cfg.GetNodeInfo(node)
2822
        if ninfo.offline:
2823
          self.LogInfo("Not checking drbd helper on offline node %s", node)
2824
          continue
2825
        msg = helpers[node].fail_msg
2826
        if msg:
2827
          raise errors.OpPrereqError("Error checking drbd helper on node"
2828
                                     " '%s': %s" % (node, msg),
2829
                                     errors.ECODE_ENVIRON)
2830
        node_helper = helpers[node].payload
2831
        if node_helper != self.op.drbd_helper:
2832
          raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
2833
                                     (node, node_helper), errors.ECODE_ENVIRON)
2834

    
2835
    self.cluster = cluster = self.cfg.GetClusterInfo()
2836
    # validate params changes
2837
    if self.op.beparams:
2838
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
2839
      self.new_beparams = cluster.SimpleFillBE(self.op.beparams)
2840

    
2841
    if self.op.ndparams:
2842
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
2843
      self.new_ndparams = cluster.SimpleFillND(self.op.ndparams)
2844

    
2845
      # TODO: we need a more general way to handle resetting
2846
      # cluster-level parameters to default values
2847
      if self.new_ndparams["oob_program"] == "":
2848
        self.new_ndparams["oob_program"] = \
2849
            constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM]
2850

    
2851
    if self.op.nicparams:
2852
      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
2853
      self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
2854
      objects.NIC.CheckParameterSyntax(self.new_nicparams)
2855
      nic_errors = []
2856

    
2857
      # check all instances for consistency
2858
      for instance in self.cfg.GetAllInstancesInfo().values():
2859
        for nic_idx, nic in enumerate(instance.nics):
2860
          params_copy = copy.deepcopy(nic.nicparams)
2861
          params_filled = objects.FillDict(self.new_nicparams, params_copy)
2862

    
2863
          # check parameter syntax
2864
          try:
2865
            objects.NIC.CheckParameterSyntax(params_filled)
2866
          except errors.ConfigurationError, err:
2867
            nic_errors.append("Instance %s, nic/%d: %s" %
2868
                              (instance.name, nic_idx, err))
2869

    
2870
          # if we're moving instances to routed, check that they have an ip
2871
          target_mode = params_filled[constants.NIC_MODE]
2872
          if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
2873
            nic_errors.append("Instance %s, nic/%d: routed NIC with no ip"
2874
                              " address" % (instance.name, nic_idx))
2875
      if nic_errors:
2876
        raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
2877
                                   "\n".join(nic_errors))
2878

    
2879
    # hypervisor list/parameters
2880
    self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
2881
    if self.op.hvparams:
2882
      for hv_name, hv_dict in self.op.hvparams.items():
2883
        if hv_name not in self.new_hvparams:
2884
          self.new_hvparams[hv_name] = hv_dict
2885
        else:
2886
          self.new_hvparams[hv_name].update(hv_dict)
2887

    
2888
    # os hypervisor parameters
2889
    self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
2890
    if self.op.os_hvp:
2891
      for os_name, hvs in self.op.os_hvp.items():
2892
        if os_name not in self.new_os_hvp:
2893
          self.new_os_hvp[os_name] = hvs
2894
        else:
2895
          for hv_name, hv_dict in hvs.items():
2896
            if hv_name not in self.new_os_hvp[os_name]:
2897
              self.new_os_hvp[os_name][hv_name] = hv_dict
2898
            else:
2899
              self.new_os_hvp[os_name][hv_name].update(hv_dict)
2900

    
2901
    # os parameters
2902
    self.new_osp = objects.FillDict(cluster.osparams, {})
2903
    if self.op.osparams:
2904
      for os_name, osp in self.op.osparams.items():
2905
        if os_name not in self.new_osp:
2906
          self.new_osp[os_name] = {}
2907

    
2908
        self.new_osp[os_name] = _GetUpdatedParams(self.new_osp[os_name], osp,
2909
                                                  use_none=True)
2910

    
2911
        if not self.new_osp[os_name]:
2912
          # we removed all parameters
2913
          del self.new_osp[os_name]
2914
        else:
2915
          # check the parameter validity (remote check)
2916
          _CheckOSParams(self, False, [self.cfg.GetMasterNode()],
2917
                         os_name, self.new_osp[os_name])
2918

    
2919
    # changes to the hypervisor list
2920
    if self.op.enabled_hypervisors is not None:
2921
      self.hv_list = self.op.enabled_hypervisors
2922
      for hv in self.hv_list:
2923
        # if the hypervisor doesn't already exist in the cluster
2924
        # hvparams, we initialize it to empty, and then (in both
2925
        # cases) we make sure to fill the defaults, as we might not
2926
        # have a complete defaults list if the hypervisor wasn't
2927
        # enabled before
2928
        if hv not in new_hvp:
2929
          new_hvp[hv] = {}
2930
        new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
2931
        utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
2932
    else:
2933
      self.hv_list = cluster.enabled_hypervisors
2934

    
2935
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
2936
      # either the enabled list has changed, or the parameters have, validate
2937
      for hv_name, hv_params in self.new_hvparams.items():
2938
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
2939
            (self.op.enabled_hypervisors and
2940
             hv_name in self.op.enabled_hypervisors)):
2941
          # either this is a new hypervisor, or its parameters have changed
2942
          hv_class = hypervisor.GetHypervisor(hv_name)
2943
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2944
          hv_class.CheckParameterSyntax(hv_params)
2945
          _CheckHVParams(self, node_list, hv_name, hv_params)
2946

    
2947
    if self.op.os_hvp:
2948
      # no need to check any newly-enabled hypervisors, since the
2949
      # defaults have already been checked in the above code-block
2950
      for os_name, os_hvp in self.new_os_hvp.items():
2951
        for hv_name, hv_params in os_hvp.items():
2952
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2953
          # we need to fill in the new os_hvp on top of the actual hv_p
2954
          cluster_defaults = self.new_hvparams.get(hv_name, {})
2955
          new_osp = objects.FillDict(cluster_defaults, hv_params)
2956
          hv_class = hypervisor.GetHypervisor(hv_name)
2957
          hv_class.CheckParameterSyntax(new_osp)
2958
          _CheckHVParams(self, node_list, hv_name, new_osp)
2959

    
2960
    if self.op.default_iallocator:
2961
      alloc_script = utils.FindFile(self.op.default_iallocator,
2962
                                    constants.IALLOCATOR_SEARCH_PATH,
2963
                                    os.path.isfile)
2964
      if alloc_script is None:
2965
        raise errors.OpPrereqError("Invalid default iallocator script '%s'"
2966
                                   " specified" % self.op.default_iallocator,
2967
                                   errors.ECODE_INVAL)
2968

    
2969
  def Exec(self, feedback_fn):
2970
    """Change the parameters of the cluster.
2971

2972
    """
2973
    if self.op.vg_name is not None:
2974
      new_volume = self.op.vg_name
2975
      if not new_volume:
2976
        new_volume = None
2977
      if new_volume != self.cfg.GetVGName():
2978
        self.cfg.SetVGName(new_volume)
2979
      else:
2980
        feedback_fn("Cluster LVM configuration already in desired"
2981
                    " state, not changing")
2982
    if self.op.drbd_helper is not None:
2983
      new_helper = self.op.drbd_helper
2984
      if not new_helper:
2985
        new_helper = None
2986
      if new_helper != self.cfg.GetDRBDHelper():
2987
        self.cfg.SetDRBDHelper(new_helper)
2988
      else:
2989
        feedback_fn("Cluster DRBD helper already in desired state,"
2990
                    " not changing")
2991
    if self.op.hvparams:
2992
      self.cluster.hvparams = self.new_hvparams
2993
    if self.op.os_hvp:
2994
      self.cluster.os_hvp = self.new_os_hvp
2995
    if self.op.enabled_hypervisors is not None:
2996
      self.cluster.hvparams = self.new_hvparams
2997
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
2998
    if self.op.beparams:
2999
      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
3000
    if self.op.nicparams:
3001
      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
3002
    if self.op.osparams:
3003
      self.cluster.osparams = self.new_osp
3004
    if self.op.ndparams:
3005
      self.cluster.ndparams = self.new_ndparams
3006

    
3007
    if self.op.candidate_pool_size is not None:
3008
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
3009
      # we need to update the pool size here, otherwise the save will fail
3010
      _AdjustCandidatePool(self, [])
3011

    
3012
    if self.op.maintain_node_health is not None:
3013
      self.cluster.maintain_node_health = self.op.maintain_node_health
3014

    
3015
    if self.op.prealloc_wipe_disks is not None:
3016
      self.cluster.prealloc_wipe_disks = self.op.prealloc_wipe_disks
3017

    
3018
    if self.op.add_uids is not None:
3019
      uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
3020

    
3021
    if self.op.remove_uids is not None:
3022
      uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
3023

    
3024
    if self.op.uid_pool is not None:
3025
      self.cluster.uid_pool = self.op.uid_pool
3026

    
3027
    if self.op.default_iallocator is not None:
3028
      self.cluster.default_iallocator = self.op.default_iallocator
3029

    
3030
    if self.op.reserved_lvs is not None:
3031
      self.cluster.reserved_lvs = self.op.reserved_lvs
3032

    
3033
    def helper_os(aname, mods, desc):
3034
      desc += " OS list"
3035
      lst = getattr(self.cluster, aname)
3036
      for key, val in mods:
3037
        if key == constants.DDM_ADD:
3038
          if val in lst:
3039
            feedback_fn("OS %s already in %s, ignoring" % (val, desc))
3040
          else:
3041
            lst.append(val)
3042
        elif key == constants.DDM_REMOVE:
3043
          if val in lst:
3044
            lst.remove(val)
3045
          else:
3046
            feedback_fn("OS %s not found in %s, ignoring" % (val, desc))
3047
        else:
3048
          raise errors.ProgrammerError("Invalid modification '%s'" % key)
3049

    
3050
    if self.op.hidden_os:
3051
      helper_os("hidden_os", self.op.hidden_os, "hidden")
3052

    
3053
    if self.op.blacklisted_os:
3054
      helper_os("blacklisted_os", self.op.blacklisted_os, "blacklisted")
3055

    
3056
    if self.op.master_netdev:
3057
      master = self.cfg.GetMasterNode()
3058
      feedback_fn("Shutting down master ip on the current netdev (%s)" %
3059
                  self.cluster.master_netdev)
3060
      result = self.rpc.call_node_stop_master(master, False)
3061
      result.Raise("Could not disable the master ip")
3062
      feedback_fn("Changing master_netdev from %s to %s" %
3063
                  (self.cluster.master_netdev, self.op.master_netdev))
3064
      self.cluster.master_netdev = self.op.master_netdev
3065

    
3066
    self.cfg.Update(self.cluster, feedback_fn)
3067

    
3068
    if self.op.master_netdev:
3069
      feedback_fn("Starting the master ip on the new master netdev (%s)" %
3070
                  self.op.master_netdev)
3071
      result = self.rpc.call_node_start_master(master, False, False)
3072
      if result.fail_msg:
3073
        self.LogWarning("Could not re-enable the master ip on"
3074
                        " the master, please restart manually: %s",
3075
                        result.fail_msg)
3076

    
3077

    
3078
def _UploadHelper(lu, nodes, fname):
3079
  """Helper for uploading a file and showing warnings.
3080

3081
  """
3082
  if os.path.exists(fname):
3083
    result = lu.rpc.call_upload_file(nodes, fname)
3084
    for to_node, to_result in result.items():
3085
      msg = to_result.fail_msg
3086
      if msg:
3087
        msg = ("Copy of file %s to node %s failed: %s" %
3088
               (fname, to_node, msg))
3089
        lu.proc.LogWarning(msg)
3090

    
3091

    
3092
def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
3093
  """Distribute additional files which are part of the cluster configuration.
3094

3095
  ConfigWriter takes care of distributing the config and ssconf files, but
3096
  there are more files which should be distributed to all nodes. This function
3097
  makes sure those are copied.
3098

3099
  @param lu: calling logical unit
3100
  @param additional_nodes: list of nodes not in the config to distribute to
3101
  @type additional_vm: boolean
3102
  @param additional_vm: whether the additional nodes are vm-capable or not
3103

3104
  """
3105
  # 1. Gather target nodes
3106
  myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
3107
  dist_nodes = lu.cfg.GetOnlineNodeList()
3108
  nvm_nodes = lu.cfg.GetNonVmCapableNodeList()
3109
  vm_nodes = [name for name in dist_nodes if name not in nvm_nodes]
3110
  if additional_nodes is not None:
3111
    dist_nodes.extend(additional_nodes)
3112
    if additional_vm:
3113
      vm_nodes.extend(additional_nodes)
3114
  if myself.name in dist_nodes:
3115
    dist_nodes.remove(myself.name)
3116
  if myself.name in vm_nodes:
3117
    vm_nodes.remove(myself.name)
3118

    
3119
  # 2. Gather files to distribute
3120
  dist_files = set([constants.ETC_HOSTS,
3121
                    constants.SSH_KNOWN_HOSTS_FILE,
3122
                    constants.RAPI_CERT_FILE,
3123
                    constants.RAPI_USERS_FILE,
3124
                    constants.CONFD_HMAC_KEY,
3125
                    constants.CLUSTER_DOMAIN_SECRET_FILE,
3126
                   ])
3127

    
3128
  vm_files = set()
3129
  enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
3130
  for hv_name in enabled_hypervisors:
3131
    hv_class = hypervisor.GetHypervisor(hv_name)
3132
    vm_files.update(hv_class.GetAncillaryFiles())
3133

    
3134
  # 3. Perform the files upload
3135
  for fname in dist_files:
3136
    _UploadHelper(lu, dist_nodes, fname)
3137
  for fname in vm_files:
3138
    _UploadHelper(lu, vm_nodes, fname)
3139

    
3140

    
3141
class LUClusterRedistConf(NoHooksLU):
3142
  """Force the redistribution of cluster configuration.
3143

3144
  This is a very simple LU.
3145

3146
  """
3147
  REQ_BGL = False
3148

    
3149
  def ExpandNames(self):
3150
    self.needed_locks = {
3151
      locking.LEVEL_NODE: locking.ALL_SET,
3152
    }
3153
    self.share_locks[locking.LEVEL_NODE] = 1
3154

    
3155
  def Exec(self, feedback_fn):
3156
    """Redistribute the configuration.
3157

3158
    """
3159
    self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
3160
    _RedistributeAncillaryFiles(self)
3161

    
3162

    
3163
def _WaitForSync(lu, instance, disks=None, oneshot=False):
3164
  """Sleep and poll for an instance's disk to sync.
3165

3166
  """
3167
  if not instance.disks or disks is not None and not disks:
3168
    return True
3169

    
3170
  disks = _ExpandCheckDisks(instance, disks)
3171

    
3172
  if not oneshot:
3173
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
3174

    
3175
  node = instance.primary_node
3176

    
3177
  for dev in disks:
3178
    lu.cfg.SetDiskID(dev, node)
3179

    
3180
  # TODO: Convert to utils.Retry
3181

    
3182
  retries = 0
3183
  degr_retries = 10 # in seconds, as we sleep 1 second each time
3184
  while True:
3185
    max_time = 0
3186
    done = True
3187
    cumul_degraded = False
3188
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, disks)
3189
    msg = rstats.fail_msg
3190
    if msg:
3191
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
3192
      retries += 1
3193
      if retries >= 10:
3194
        raise errors.RemoteError("Can't contact node %s for mirror data,"
3195
                                 " aborting." % node)
3196
      time.sleep(6)
3197
      continue
3198
    rstats = rstats.payload
3199
    retries = 0
3200
    for i, mstat in enumerate(rstats):
3201
      if mstat is None:
3202
        lu.LogWarning("Can't compute data for node %s/%s",
3203
                           node, disks[i].iv_name)
3204
        continue
3205

    
3206
      cumul_degraded = (cumul_degraded or
3207
                        (mstat.is_degraded and mstat.sync_percent is None))
3208
      if mstat.sync_percent is not None:
3209
        done = False
3210
        if mstat.estimated_time is not None:
3211
          rem_time = ("%s remaining (estimated)" %
3212
                      utils.FormatSeconds(mstat.estimated_time))
3213
          max_time = mstat.estimated_time
3214
        else:
3215
          rem_time = "no time estimate"
3216
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
3217
                        (disks[i].iv_name, mstat.sync_percent, rem_time))
3218

    
3219
    # if we're done but degraded, let's do a few small retries, to
3220
    # make sure we see a stable and not transient situation; therefore
3221
    # we force restart of the loop
3222
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
3223
      logging.info("Degraded disks found, %d retries left", degr_retries)
3224
      degr_retries -= 1
3225
      time.sleep(1)
3226
      continue
3227

    
3228
    if done or oneshot:
3229
      break
3230

    
3231
    time.sleep(min(60, max_time))
3232

    
3233
  if done:
3234
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
3235
  return not cumul_degraded
3236

    
3237

    
3238
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
3239
  """Check that mirrors are not degraded.
3240

3241
  The ldisk parameter, if True, will change the test from the
3242
  is_degraded attribute (which represents overall non-ok status for
3243
  the device(s)) to the ldisk (representing the local storage status).
3244

3245
  """
3246
  lu.cfg.SetDiskID(dev, node)
3247

    
3248
  result = True
3249

    
3250
  if on_primary or dev.AssembleOnSecondary():
3251
    rstats = lu.rpc.call_blockdev_find(node, dev)
3252
    msg = rstats.fail_msg
3253
    if msg:
3254
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
3255
      result = False
3256
    elif not rstats.payload:
3257
      lu.LogWarning("Can't find disk on node %s", node)
3258
      result = False
3259
    else:
3260
      if ldisk:
3261
        result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
3262
      else:
3263
        result = result and not rstats.payload.is_degraded
3264

    
3265
  if dev.children:
3266
    for child in dev.children:
3267
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
3268

    
3269
  return result
3270

    
3271

    
3272
class LUOobCommand(NoHooksLU):
3273
  """Logical unit for OOB handling.
3274

3275
  """
3276
  REG_BGL = False
3277

    
3278
  def CheckPrereq(self):
3279
    """Check prerequisites.
3280

3281
    This checks:
3282
     - the node exists in the configuration
3283
     - OOB is supported
3284

3285
    Any errors are signaled by raising errors.OpPrereqError.
3286

3287
    """
3288
    self.nodes = []
3289
    for node_name in self.op.node_names:
3290
      node = self.cfg.GetNodeInfo(node_name)
3291

    
3292
      if node is None:
3293
        raise errors.OpPrereqError("Node %s not found" % node_name,
3294
                                   errors.ECODE_NOENT)
3295
      else:
3296
        self.nodes.append(node)
3297

    
3298
      if (self.op.command == constants.OOB_POWER_OFF and not node.offline):
3299
        raise errors.OpPrereqError(("Cannot power off node %s because it is"
3300
                                    " not marked offline") % node_name,
3301
                                   errors.ECODE_STATE)
3302

    
3303
  def ExpandNames(self):
3304
    """Gather locks we need.
3305

3306
    """
3307
    if self.op.node_names:
3308
      self.op.node_names = [_ExpandNodeName(self.cfg, name)
3309
                            for name in self.op.node_names]
3310
    else:
3311
      self.op.node_names = self.cfg.GetNodeList()
3312

    
3313
    self.needed_locks = {
3314
      locking.LEVEL_NODE: self.op.node_names,
3315
      }
3316

    
3317
  def Exec(self, feedback_fn):
3318
    """Execute OOB and return result if we expect any.
3319

3320
    """
3321
    master_node = self.cfg.GetMasterNode()
3322
    ret = []
3323

    
3324
    for node in self.nodes:
3325
      node_entry = [(constants.RS_NORMAL, node.name)]
3326
      ret.append(node_entry)
3327

    
3328
      oob_program = _SupportsOob(self.cfg, node)
3329

    
3330
      if not oob_program:
3331
        node_entry.append((constants.RS_UNAVAIL, None))
3332
        continue
3333

    
3334
      logging.info("Executing out-of-band command '%s' using '%s' on %s",
3335
                   self.op.command, oob_program, node.name)
3336
      result = self.rpc.call_run_oob(master_node, oob_program,
3337
                                     self.op.command, node.name,
3338
                                     self.op.timeout)
3339

    
3340
      if result.fail_msg:
3341
        self.LogWarning("On node '%s' out-of-band RPC failed with: %s",
3342
                        node.name, result.fail_msg)
3343
        node_entry.append((constants.RS_NODATA, None))
3344
      else:
3345
        try:
3346
          self._CheckPayload(result)
3347
        except errors.OpExecError, err:
3348
          self.LogWarning("The payload returned by '%s' is not valid: %s",
3349
                          node.name, err)
3350
          node_entry.append((constants.RS_NODATA, None))
3351
        else:
3352
          if self.op.command == constants.OOB_HEALTH:
3353
            # For health we should log important events
3354
            for item, status in result.payload:
3355
              if status in [constants.OOB_STATUS_WARNING,
3356
                            constants.OOB_STATUS_CRITICAL]:
3357
                self.LogWarning("On node '%s' item '%s' has status '%s'",
3358
                                node.name, item, status)
3359

    
3360
          if self.op.command == constants.OOB_POWER_ON:
3361
            node.powered = True
3362
          elif self.op.command == constants.OOB_POWER_OFF:
3363
            node.powered = False
3364
          elif self.op.command == constants.OOB_POWER_STATUS:
3365
            powered = result.payload[constants.OOB_POWER_STATUS_POWERED]
3366
            if powered != node.powered:
3367
              logging.warning(("Recorded power state (%s) of node '%s' does not"
3368
                               " match actual power state (%s)"), node.powered,
3369
                              node.name, powered)
3370

    
3371
          # For configuration changing commands we should update the node
3372
          if self.op.command in (constants.OOB_POWER_ON,
3373
                                 constants.OOB_POWER_OFF):
3374
            self.cfg.Update(node, feedback_fn)
3375

    
3376
          node_entry.append((constants.RS_NORMAL, result.payload))
3377

    
3378
    return ret
3379

    
3380
  def _CheckPayload(self, result):
3381
    """Checks if the payload is valid.
3382

3383
    @param result: RPC result
3384
    @raises errors.OpExecError: If payload is not valid
3385

3386
    """
3387
    errs = []
3388
    if self.op.command == constants.OOB_HEALTH:
3389
      if not isinstance(result.payload, list):
3390
        errs.append("command 'health' is expected to return a list but got %s" %
3391
                    type(result.payload))
3392
      else:
3393
        for item, status in result.payload:
3394
          if status not in constants.OOB_STATUSES:
3395
            errs.append("health item '%s' has invalid status '%s'" %
3396
                        (item, status))
3397

    
3398
    if self.op.command == constants.OOB_POWER_STATUS:
3399
      if not isinstance(result.payload, dict):
3400
        errs.append("power-status is expected to return a dict but got %s" %
3401
                    type(result.payload))
3402

    
3403
    if self.op.command in [
3404
        constants.OOB_POWER_ON,
3405
        constants.OOB_POWER_OFF,
3406
        constants.OOB_POWER_CYCLE,
3407
        ]:
3408
      if result.payload is not None:
3409
        errs.append("%s is expected to not return payload but got '%s'" %
3410
                    (self.op.command, result.payload))
3411

    
3412
    if errs:
3413
      raise errors.OpExecError("Check of out-of-band payload failed due to %s" %
3414
                               utils.CommaJoin(errs))
3415

    
3416

    
3417

    
3418
class LUOsDiagnose(NoHooksLU):
3419
  """Logical unit for OS diagnose/query.
3420

3421
  """
3422
  REQ_BGL = False
3423
  _HID = "hidden"
3424
  _BLK = "blacklisted"
3425
  _VLD = "valid"
3426
  _FIELDS_STATIC = utils.FieldSet()
3427
  _FIELDS_DYNAMIC = utils.FieldSet("name", _VLD, "node_status", "variants",
3428
                                   "parameters", "api_versions", _HID, _BLK)
3429

    
3430
  def CheckArguments(self):
3431
    if self.op.names:
3432
      raise errors.OpPrereqError("Selective OS query not supported",
3433
                                 errors.ECODE_INVAL)
3434

    
3435
    _CheckOutputFields(static=self._FIELDS_STATIC,
3436
                       dynamic=self._FIELDS_DYNAMIC,
3437
                       selected=self.op.output_fields)
3438

    
3439
  def ExpandNames(self):
3440
    # Lock all nodes, in shared mode
3441
    # Temporary removal of locks, should be reverted later
3442
    # TODO: reintroduce locks when they are lighter-weight
3443
    self.needed_locks = {}
3444
    #self.share_locks[locking.LEVEL_NODE] = 1
3445
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3446

    
3447
  @staticmethod
3448
  def _DiagnoseByOS(rlist):
3449
    """Remaps a per-node return list into an a per-os per-node dictionary
3450

3451
    @param rlist: a map with node names as keys and OS objects as values
3452

3453
    @rtype: dict
3454
    @return: a dictionary with osnames as keys and as value another
3455
        map, with nodes as keys and tuples of (path, status, diagnose,
3456
        variants, parameters, api_versions) as values, eg::
3457

3458
          {"debian-etch": {"node1": [(/usr/lib/..., True, "", [], []),
3459
                                     (/srv/..., False, "invalid api")],
3460
                           "node2": [(/srv/..., True, "", [], [])]}
3461
          }
3462

3463
    """
3464
    all_os = {}
3465
    # we build here the list of nodes that didn't fail the RPC (at RPC
3466
    # level), so that nodes with a non-responding node daemon don't
3467
    # make all OSes invalid
3468
    good_nodes = [node_name for node_name in rlist
3469
                  if not rlist[node_name].fail_msg]
3470
    for node_name, nr in rlist.items():
3471
      if nr.fail_msg or not nr.payload:
3472
        continue
3473
      for (name, path, status, diagnose, variants,
3474
           params, api_versions) in nr.payload:
3475
        if name not in all_os:
3476
          # build a list of nodes for this os containing empty lists
3477
          # for each node in node_list
3478
          all_os[name] = {}
3479
          for nname in good_nodes:
3480
            all_os[name][nname] = []
3481
        # convert params from [name, help] to (name, help)
3482
        params = [tuple(v) for v in params]
3483
        all_os[name][node_name].append((path, status, diagnose,
3484
                                        variants, params, api_versions))
3485
    return all_os
3486

    
3487
  def Exec(self, feedback_fn):
3488
    """Compute the list of OSes.
3489

3490
    """
3491
    valid_nodes = [node.name
3492
                   for node in self.cfg.GetAllNodesInfo().values()
3493
                   if not node.offline and node.vm_capable]
3494
    node_data = self.rpc.call_os_diagnose(valid_nodes)
3495
    pol = self._DiagnoseByOS(node_data)
3496
    output = []
3497
    cluster = self.cfg.GetClusterInfo()
3498

    
3499
    for os_name in utils.NiceSort(pol.keys()):
3500
      os_data = pol[os_name]
3501
      row = []
3502
      valid = True
3503
      (variants, params, api_versions) = null_state = (set(), set(), set())
3504
      for idx, osl in enumerate(os_data.values()):
3505
        valid = bool(valid and osl and osl[0][1])
3506
        if not valid:
3507
          (variants, params, api_versions) = null_state
3508
          break
3509
        node_variants, node_params, node_api = osl[0][3:6]
3510
        if idx == 0: # first entry
3511
          variants = set(node_variants)
3512
          params = set(node_params)
3513
          api_versions = set(node_api)
3514
        else: # keep consistency
3515
          variants.intersection_update(node_variants)
3516
          params.intersection_update(node_params)
3517
          api_versions.intersection_update(node_api)
3518

    
3519
      is_hid = os_name in cluster.hidden_os
3520
      is_blk = os_name in cluster.blacklisted_os
3521
      if ((self._HID not in self.op.output_fields and is_hid) or
3522
          (self._BLK not in self.op.output_fields and is_blk) or
3523
          (self._VLD not in self.op.output_fields and not valid)):
3524
        continue
3525

    
3526
      for field in self.op.output_fields:
3527
        if field == "name":
3528
          val = os_name
3529
        elif field == self._VLD:
3530
          val = valid
3531
        elif field == "node_status":
3532
          # this is just a copy of the dict
3533
          val = {}
3534
          for node_name, nos_list in os_data.items():
3535
            val[node_name] = nos_list
3536
        elif field == "variants":
3537
          val = utils.NiceSort(list(variants))
3538
        elif field == "parameters":
3539
          val = list(params)
3540
        elif field == "api_versions":
3541
          val = list(api_versions)
3542
        elif field == self._HID:
3543
          val = is_hid
3544
        elif field == self._BLK:
3545
          val = is_blk
3546
        else:
3547
          raise errors.ParameterError(field)
3548
        row.append(val)
3549
      output.append(row)
3550

    
3551
    return output
3552

    
3553

    
3554
class LUNodeRemove(LogicalUnit):
3555
  """Logical unit for removing a node.
3556

3557
  """
3558
  HPATH = "node-remove"
3559
  HTYPE = constants.HTYPE_NODE
3560

    
3561
  def BuildHooksEnv(self):
3562
    """Build hooks env.
3563

3564
    This doesn't run on the target node in the pre phase as a failed
3565
    node would then be impossible to remove.
3566

3567
    """
3568
    env = {
3569
      "OP_TARGET": self.op.node_name,
3570
      "NODE_NAME": self.op.node_name,
3571
      }
3572
    all_nodes = self.cfg.GetNodeList()
3573
    try:
3574
      all_nodes.remove(self.op.node_name)
3575
    except ValueError:
3576
      logging.warning("Node %s which is about to be removed not found"
3577
                      " in the all nodes list", self.op.node_name)
3578
    return env, all_nodes, all_nodes
3579

    
3580
  def CheckPrereq(self):
3581
    """Check prerequisites.
3582

3583
    This checks:
3584
     - the node exists in the configuration
3585
     - it does not have primary or secondary instances
3586
     - it's not the master
3587

3588
    Any errors are signaled by raising errors.OpPrereqError.
3589

3590
    """
3591
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3592
    node = self.cfg.GetNodeInfo(self.op.node_name)
3593
    assert node is not None
3594

    
3595
    instance_list = self.cfg.GetInstanceList()
3596

    
3597
    masternode = self.cfg.GetMasterNode()
3598
    if node.name == masternode:
3599
      raise errors.OpPrereqError("Node is the master node,"
3600
                                 " you need to failover first.",
3601
                                 errors.ECODE_INVAL)
3602

    
3603
    for instance_name in instance_list:
3604
      instance = self.cfg.GetInstanceInfo(instance_name)
3605
      if node.name in instance.all_nodes:
3606
        raise errors.OpPrereqError("Instance %s is still running on the node,"
3607
                                   " please remove first." % instance_name,
3608
                                   errors.ECODE_INVAL)
3609
    self.op.node_name = node.name
3610
    self.node = node
3611

    
3612
  def Exec(self, feedback_fn):
3613
    """Removes the node from the cluster.
3614

3615
    """
3616
    node = self.node
3617
    logging.info("Stopping the node daemon and removing configs from node %s",
3618
                 node.name)
3619

    
3620
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
3621

    
3622
    # Promote nodes to master candidate as needed
3623
    _AdjustCandidatePool(self, exceptions=[node.name])
3624
    self.context.RemoveNode(node.name)
3625

    
3626
    # Run post hooks on the node before it's removed
3627
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
3628
    try:
3629
      hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
3630
    except:
3631
      # pylint: disable-msg=W0702
3632
      self.LogWarning("Errors occurred running hooks on %s" % node.name)
3633

    
3634
    result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
3635
    msg = result.fail_msg
3636
    if msg:
3637
      self.LogWarning("Errors encountered on the remote node while leaving"
3638
                      " the cluster: %s", msg)
3639

    
3640
    # Remove node from our /etc/hosts
3641
    if self.cfg.GetClusterInfo().modify_etc_hosts:
3642
      master_node = self.cfg.GetMasterNode()
3643
      result = self.rpc.call_etc_hosts_modify(master_node,
3644
                                              constants.ETC_HOSTS_REMOVE,
3645
                                              node.name, None)
3646
      result.Raise("Can't update hosts file with new host data")
3647
      _RedistributeAncillaryFiles(self)
3648

    
3649

    
3650
class _NodeQuery(_QueryBase):
3651
  FIELDS = query.NODE_FIELDS
3652

    
3653
  def ExpandNames(self, lu):
3654
    lu.needed_locks = {}
3655
    lu.share_locks[locking.LEVEL_NODE] = 1
3656

    
3657
    if self.names:
3658
      self.wanted = _GetWantedNodes(lu, self.names)
3659
    else:
3660
      self.wanted = locking.ALL_SET
3661

    
3662
    self.do_locking = (self.use_locking and
3663
                       query.NQ_LIVE in self.requested_data)
3664

    
3665
    if self.do_locking:
3666
      # if we don't request only static fields, we need to lock the nodes
3667
      lu.needed_locks[locking.LEVEL_NODE] = self.wanted
3668

    
3669
  def DeclareLocks(self, lu, level):
3670
    pass
3671

    
3672
  def _GetQueryData(self, lu):
3673
    """Computes the list of nodes and their attributes.
3674

3675
    """
3676
    all_info = lu.cfg.GetAllNodesInfo()
3677

    
3678
    nodenames = self._GetNames(lu, all_info.keys(), locking.LEVEL_NODE)
3679

    
3680
    # Gather data as requested
3681
    if query.NQ_LIVE in self.requested_data:
3682
      # filter out non-vm_capable nodes
3683
      toquery_nodes = [name for name in nodenames if all_info[name].vm_capable]
3684

    
3685
      node_data = lu.rpc.call_node_info(toquery_nodes, lu.cfg.GetVGName(),
3686
                                        lu.cfg.GetHypervisorType())
3687
      live_data = dict((name, nresult.payload)
3688
                       for (name, nresult) in node_data.items()
3689
                       if not nresult.fail_msg and nresult.payload)
3690
    else:
3691
      live_data = None
3692

    
3693
    if query.NQ_INST in self.requested_data:
3694
      node_to_primary = dict([(name, set()) for name in nodenames])
3695
      node_to_secondary = dict([(name, set()) for name in nodenames])
3696

    
3697
      inst_data = lu.cfg.GetAllInstancesInfo()
3698

    
3699
      for inst in inst_data.values():
3700
        if inst.primary_node in node_to_primary:
3701
          node_to_primary[inst.primary_node].add(inst.name)
3702
        for secnode in inst.secondary_nodes:
3703
          if secnode in node_to_secondary:
3704
            node_to_secondary[secnode].add(inst.name)
3705
    else:
3706
      node_to_primary = None
3707
      node_to_secondary = None
3708

    
3709
    if query.NQ_OOB in self.requested_data:
3710
      oob_support = dict((name, bool(_SupportsOob(lu.cfg, node)))
3711
                         for name, node in all_info.iteritems())
3712
    else:
3713
      oob_support = None
3714

    
3715
    if query.NQ_GROUP in self.requested_data:
3716
      groups = lu.cfg.GetAllNodeGroupsInfo()
3717
    else:
3718
      groups = {}
3719

    
3720
    return query.NodeQueryData([all_info[name] for name in nodenames],
3721
                               live_data, lu.cfg.GetMasterNode(),
3722
                               node_to_primary, node_to_secondary, groups,
3723
                               oob_support, lu.cfg.GetClusterInfo())
3724

    
3725

    
3726
class LUNodeQuery(NoHooksLU):
3727
  """Logical unit for querying nodes.
3728

3729
  """
3730
  # pylint: disable-msg=W0142
3731
  REQ_BGL = False
3732

    
3733
  def CheckArguments(self):
3734
    self.nq = _NodeQuery(self.op.names, self.op.output_fields,
3735
                         self.op.use_locking)
3736

    
3737
  def ExpandNames(self):
3738
    self.nq.ExpandNames(self)
3739

    
3740
  def Exec(self, feedback_fn):
3741
    return self.nq.OldStyleQuery(self)
3742

    
3743

    
3744
class LUNodeQueryvols(NoHooksLU):
3745
  """Logical unit for getting volumes on node(s).
3746

3747
  """
3748
  REQ_BGL = False
3749
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
3750
  _FIELDS_STATIC = utils.FieldSet("node")
3751

    
3752
  def CheckArguments(self):
3753
    _CheckOutputFields(static=self._FIELDS_STATIC,
3754
                       dynamic=self._FIELDS_DYNAMIC,
3755
                       selected=self.op.output_fields)
3756

    
3757
  def ExpandNames(self):
3758
    self.needed_locks = {}
3759
    self.share_locks[locking.LEVEL_NODE] = 1
3760
    if not self.op.nodes:
3761
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3762
    else:
3763
      self.needed_locks[locking.LEVEL_NODE] = \
3764
        _GetWantedNodes(self, self.op.nodes)
3765

    
3766
  def Exec(self, feedback_fn):
3767
    """Computes the list of nodes and their attributes.
3768

3769
    """
3770
    nodenames = self.acquired_locks[locking.LEVEL_NODE]
3771
    volumes = self.rpc.call_node_volumes(nodenames)
3772

    
3773
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
3774
             in self.cfg.GetInstanceList()]
3775

    
3776
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
3777

    
3778
    output = []
3779
    for node in nodenames:
3780
      nresult = volumes[node]
3781
      if nresult.offline:
3782
        continue
3783
      msg = nresult.fail_msg
3784
      if msg:
3785
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
3786
        continue
3787

    
3788
      node_vols = nresult.payload[:]
3789
      node_vols.sort(key=lambda vol: vol['dev'])
3790

    
3791
      for vol in node_vols:
3792
        node_output = []
3793
        for field in self.op.output_fields:
3794
          if field == "node":
3795
            val = node
3796
          elif field == "phys":
3797
            val = vol['dev']
3798
          elif field == "vg":
3799
            val = vol['vg']
3800
          elif field == "name":
3801
            val = vol['name']
3802
          elif field == "size":
3803
            val = int(float(vol['size']))
3804
          elif field == "instance":
3805
            for inst in ilist:
3806
              if node not in lv_by_node[inst]:
3807
                continue
3808
              if vol['name'] in lv_by_node[inst][node]:
3809
                val = inst.name
3810
                break
3811
            else:
3812
              val = '-'
3813
          else:
3814
            raise errors.ParameterError(field)
3815
          node_output.append(str(val))
3816

    
3817
        output.append(node_output)
3818

    
3819
    return output
3820

    
3821

    
3822
class LUNodeQueryStorage(NoHooksLU):
3823
  """Logical unit for getting information on storage units on node(s).
3824

3825
  """
3826
  _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
3827
  REQ_BGL = False
3828

    
3829
  def CheckArguments(self):
3830
    _CheckOutputFields(static=self._FIELDS_STATIC,
3831
                       dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
3832
                       selected=self.op.output_fields)
3833

    
3834
  def ExpandNames(self):
3835
    self.needed_locks = {}
3836
    self.share_locks[locking.LEVEL_NODE] = 1
3837

    
3838
    if self.op.nodes:
3839
      self.needed_locks[locking.LEVEL_NODE] = \
3840
        _GetWantedNodes(self, self.op.nodes)
3841
    else:
3842
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3843

    
3844
  def Exec(self, feedback_fn):
3845
    """Computes the list of nodes and their attributes.
3846

3847
    """
3848
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
3849

    
3850
    # Always get name to sort by
3851
    if constants.SF_NAME in self.op.output_fields:
3852
      fields = self.op.output_fields[:]
3853
    else:
3854
      fields = [constants.SF_NAME] + self.op.output_fields
3855

    
3856
    # Never ask for node or type as it's only known to the LU
3857
    for extra in [constants.SF_NODE, constants.SF_TYPE]:
3858
      while extra in fields:
3859
        fields.remove(extra)
3860

    
3861
    field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
3862
    name_idx = field_idx[constants.SF_NAME]
3863

    
3864
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
3865
    data = self.rpc.call_storage_list(self.nodes,
3866
                                      self.op.storage_type, st_args,
3867
                                      self.op.name, fields)
3868

    
3869
    result = []
3870

    
3871
    for node in utils.NiceSort(self.nodes):
3872
      nresult = data[node]
3873
      if nresult.offline:
3874
        continue
3875

    
3876
      msg = nresult.fail_msg
3877
      if msg:
3878
        self.LogWarning("Can't get storage data from node %s: %s", node, msg)
3879
        continue
3880

    
3881
      rows = dict([(row[name_idx], row) for row in nresult.payload])
3882

    
3883
      for name in utils.NiceSort(rows.keys()):
3884
        row = rows[name]
3885

    
3886
        out = []
3887

    
3888
        for field in self.op.output_fields:
3889
          if field == constants.SF_NODE:
3890
            val = node
3891
          elif field == constants.SF_TYPE:
3892
            val = self.op.storage_type
3893
          elif field in field_idx:
3894
            val = row[field_idx[field]]
3895
          else:
3896
            raise errors.ParameterError(field)
3897

    
3898
          out.append(val)
3899

    
3900
        result.append(out)
3901

    
3902
    return result
3903

    
3904

    
3905
class _InstanceQuery(_QueryBase):
3906
  FIELDS = query.INSTANCE_FIELDS
3907

    
3908
  def ExpandNames(self, lu):
3909
    lu.needed_locks = {}
3910
    lu.share_locks[locking.LEVEL_INSTANCE] = 1
3911
    lu.share_locks[locking.LEVEL_NODE] = 1
3912

    
3913
    if self.names:
3914
      self.wanted = _GetWantedInstances(lu, self.names)
3915
    else:
3916
      self.wanted = locking.ALL_SET
3917

    
3918
    self.do_locking = (self.use_locking and
3919
                       query.IQ_LIVE in self.requested_data)
3920
    if self.do_locking:
3921
      lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3922
      lu.needed_locks[locking.LEVEL_NODE] = []
3923
      lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3924

    
3925
  def DeclareLocks(self, lu, level):
3926
    if level == locking.LEVEL_NODE and self.do_locking:
3927
      lu._LockInstancesNodes() # pylint: disable-msg=W0212
3928

    
3929
  def _GetQueryData(self, lu):
3930
    """Computes the list of instances and their attributes.
3931

3932
    """
3933
    cluster = lu.cfg.GetClusterInfo()
3934
    all_info = lu.cfg.GetAllInstancesInfo()
3935

    
3936
    instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
3937

    
3938
    instance_list = [all_info[name] for name in instance_names]
3939
    nodes = frozenset(itertools.chain(*(inst.all_nodes
3940
                                        for inst in instance_list)))
3941
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
3942
    bad_nodes = []
3943
    offline_nodes = []
3944
    wrongnode_inst = set()
3945

    
3946
    # Gather data as requested
3947
    if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
3948
      live_data = {}
3949
      node_data = lu.rpc.call_all_instances_info(nodes, hv_list)
3950
      for name in nodes:
3951
        result = node_data[name]
3952
        if result.offline:
3953
          # offline nodes will be in both lists
3954
          assert result.fail_msg
3955
          offline_nodes.append(name)
3956
        if result.fail_msg:
3957
          bad_nodes.append(name)
3958
        elif result.payload:
3959
          for inst in result.payload:
3960
            if inst in all_info:
3961
              if all_info[inst].primary_node == name:
3962
                live_data.update(result.payload)
3963
              else:
3964
                wrongnode_inst.add(inst)
3965
            else:
3966
              # orphan instance; we don't list it here as we don't
3967
              # handle this case yet in the output of instance listing
3968
              logging.warning("Orphan instance '%s' found on node %s",
3969
                              inst, name)
3970
        # else no instance is alive
3971
    else:
3972
      live_data = {}
3973

    
3974
    if query.IQ_DISKUSAGE in self.requested_data:
3975
      disk_usage = dict((inst.name,
3976
                         _ComputeDiskSize(inst.disk_template,
3977
                                          [{"size": disk.size}
3978
                                           for disk in inst.disks]))
3979
                        for inst in instance_list)
3980
    else:
3981
      disk_usage = None
3982

    
3983
    if query.IQ_CONSOLE in self.requested_data:
3984
      consinfo = {}
3985
      for inst in instance_list:
3986
        if inst.name in live_data:
3987
          # Instance is running
3988
          consinfo[inst.name] = _GetInstanceConsole(cluster, inst)
3989
        else:
3990
          consinfo[inst.name] = None
3991
      assert set(consinfo.keys()) == set(instance_names)
3992
    else:
3993
      consinfo = None
3994

    
3995
    return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
3996
                                   disk_usage, offline_nodes, bad_nodes,
3997
                                   live_data, wrongnode_inst, consinfo)
3998

    
3999

    
4000
class LUQuery(NoHooksLU):
4001
  """Query for resources/items of a certain kind.
4002

4003
  """
4004
  # pylint: disable-msg=W0142
4005
  REQ_BGL = False
4006

    
4007
  def CheckArguments(self):
4008
    qcls = _GetQueryImplementation(self.op.what)
4009
    names = qlang.ReadSimpleFilter("name", self.op.filter)
4010

    
4011
    self.impl = qcls(names, self.op.fields, False)
4012

    
4013
  def ExpandNames(self):
4014
    self.impl.ExpandNames(self)
4015

    
4016
  def DeclareLocks(self, level):
4017
    self.impl.DeclareLocks(self, level)
4018

    
4019
  def Exec(self, feedback_fn):
4020
    return self.impl.NewStyleQuery(self)
4021

    
4022

    
4023
class LUQueryFields(NoHooksLU):
4024
  """Query for resources/items of a certain kind.
4025

4026
  """
4027
  # pylint: disable-msg=W0142
4028
  REQ_BGL = False
4029

    
4030
  def CheckArguments(self):
4031
    self.qcls = _GetQueryImplementation(self.op.what)
4032

    
4033
  def ExpandNames(self):
4034
    self.needed_locks = {}
4035

    
4036
  def Exec(self, feedback_fn):
4037
    return self.qcls.FieldsQuery(self.op.fields)
4038

    
4039

    
4040
class LUNodeModifyStorage(NoHooksLU):
4041
  """Logical unit for modifying a storage volume on a node.
4042

4043
  """
4044
  REQ_BGL = False
4045

    
4046
  def CheckArguments(self):
4047
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4048

    
4049
    storage_type = self.op.storage_type
4050

    
4051
    try:
4052
      modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
4053
    except KeyError:
4054
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
4055
                                 " modified" % storage_type,
4056
                                 errors.ECODE_INVAL)
4057

    
4058
    diff = set(self.op.changes.keys()) - modifiable
4059
    if diff:
4060
      raise errors.OpPrereqError("The following fields can not be modified for"
4061
                                 " storage units of type '%s': %r" %
4062
                                 (storage_type, list(diff)),
4063
                                 errors.ECODE_INVAL)
4064

    
4065
  def ExpandNames(self):
4066
    self.needed_locks = {
4067
      locking.LEVEL_NODE: self.op.node_name,
4068
      }
4069

    
4070
  def Exec(self, feedback_fn):
4071
    """Computes the list of nodes and their attributes.
4072

4073
    """
4074
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
4075
    result = self.rpc.call_storage_modify(self.op.node_name,
4076
                                          self.op.storage_type, st_args,
4077
                                          self.op.name, self.op.changes)
4078
    result.Raise("Failed to modify storage unit '%s' on %s" %
4079
                 (self.op.name, self.op.node_name))
4080

    
4081

    
4082
class LUNodeAdd(LogicalUnit):
4083
  """Logical unit for adding node to the cluster.
4084

4085
  """
4086
  HPATH = "node-add"
4087
  HTYPE = constants.HTYPE_NODE
4088
  _NFLAGS = ["master_capable", "vm_capable"]
4089

    
4090
  def CheckArguments(self):
4091
    self.primary_ip_family = self.cfg.GetPrimaryIPFamily()
4092
    # validate/normalize the node name
4093
    self.hostname = netutils.GetHostname(name=self.op.node_name,
4094
                                         family=self.primary_ip_family)
4095
    self.op.node_name = self.hostname.name
4096

    
4097
    if self.op.readd and self.op.node_name == self.cfg.GetMasterNode():
4098
      raise errors.OpPrereqError("Cannot readd the master node",
4099
                                 errors.ECODE_STATE)
4100

    
4101
    if self.op.readd and self.op.group:
4102
      raise errors.OpPrereqError("Cannot pass a node group when a node is"
4103
                                 " being readded", errors.ECODE_INVAL)
4104

    
4105
  def BuildHooksEnv(self):
4106
    """Build hooks env.
4107

4108
    This will run on all nodes before, and on all nodes + the new node after.
4109

4110
    """
4111
    env = {
4112
      "OP_TARGET": self.op.node_name,
4113
      "NODE_NAME": self.op.node_name,
4114
      "NODE_PIP": self.op.primary_ip,
4115
      "NODE_SIP": self.op.secondary_ip,
4116
      "MASTER_CAPABLE": str(self.op.master_capable),
4117
      "VM_CAPABLE": str(self.op.vm_capable),
4118
      }
4119
    nodes_0 = self.cfg.GetNodeList()
4120
    nodes_1 = nodes_0 + [self.op.node_name, ]
4121
    return env, nodes_0, nodes_1
4122

    
4123
  def CheckPrereq(self):
4124
    """Check prerequisites.
4125

4126
    This checks:
4127
     - the new node is not already in the config
4128
     - it is resolvable
4129
     - its parameters (single/dual homed) matches the cluster
4130

4131
    Any errors are signaled by raising errors.OpPrereqError.
4132

4133
    """
4134
    cfg = self.cfg
4135
    hostname = self.hostname
4136
    node = hostname.name
4137
    primary_ip = self.op.primary_ip = hostname.ip
4138
    if self.op.secondary_ip is None:
4139
      if self.primary_ip_family == netutils.IP6Address.family:
4140
        raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
4141
                                   " IPv4 address must be given as secondary",
4142
                                   errors.ECODE_INVAL)
4143
      self.op.secondary_ip = primary_ip
4144

    
4145
    secondary_ip = self.op.secondary_ip
4146
    if not netutils.IP4Address.IsValid(secondary_ip):
4147
      raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
4148
                                 " address" % secondary_ip, errors.ECODE_INVAL)
4149

    
4150
    node_list = cfg.GetNodeList()
4151
    if not self.op.readd and node in node_list:
4152
      raise errors.OpPrereqError("Node %s is already in the configuration" %
4153
                                 node, errors.ECODE_EXISTS)
4154
    elif self.op.readd and node not in node_list:
4155
      raise errors.OpPrereqError("Node %s is not in the configuration" % node,
4156
                                 errors.ECODE_NOENT)
4157

    
4158
    self.changed_primary_ip = False
4159

    
4160
    for existing_node_name in node_list:
4161
      existing_node = cfg.GetNodeInfo(existing_node_name)
4162

    
4163
      if self.op.readd and node == existing_node_name:
4164
        if existing_node.secondary_ip != secondary_ip:
4165
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
4166
                                     " address configuration as before",
4167
                                     errors.ECODE_INVAL)
4168
        if existing_node.primary_ip != primary_ip:
4169
          self.changed_primary_ip = True
4170

    
4171
        continue
4172

    
4173
      if (existing_node.primary_ip == primary_ip or
4174
          existing_node.secondary_ip == primary_ip or
4175
          existing_node.primary_ip == secondary_ip or
4176
          existing_node.secondary_ip == secondary_ip):
4177
        raise errors.OpPrereqError("New node ip address(es) conflict with"
4178
                                   " existing node %s" % existing_node.name,
4179
                                   errors.ECODE_NOTUNIQUE)
4180

    
4181
    # After this 'if' block, None is no longer a valid value for the
4182
    # _capable op attributes
4183
    if self.op.readd:
4184
      old_node = self.cfg.GetNodeInfo(node)
4185
      assert old_node is not None, "Can't retrieve locked node %s" % node
4186
      for attr in self._NFLAGS:
4187
        if getattr(self.op, attr) is None:
4188
          setattr(self.op, attr, getattr(old_node, attr))
4189
    else:
4190
      for attr in self._NFLAGS:
4191
        if getattr(self.op, attr) is None:
4192
          setattr(self.op, attr, True)
4193

    
4194
    if self.op.readd and not self.op.vm_capable:
4195
      pri, sec = cfg.GetNodeInstances(node)
4196
      if pri or sec:
4197
        raise errors.OpPrereqError("Node %s being re-added with vm_capable"
4198
                                   " flag set to false, but it already holds"
4199
                                   " instances" % node,
4200
                                   errors.ECODE_STATE)
4201

    
4202
    # check that the type of the node (single versus dual homed) is the
4203
    # same as for the master
4204
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
4205
    master_singlehomed = myself.secondary_ip == myself.primary_ip
4206
    newbie_singlehomed = secondary_ip == primary_ip
4207
    if master_singlehomed != newbie_singlehomed:
4208
      if master_singlehomed:
4209
        raise errors.OpPrereqError("The master has no secondary ip but the"
4210
                                   " new node has one",
4211
                                   errors.ECODE_INVAL)
4212
      else:
4213
        raise errors.OpPrereqError("The master has a secondary ip but the"
4214
                                   " new node doesn't have one",
4215
                                   errors.ECODE_INVAL)
4216

    
4217
    # checks reachability
4218
    if not netutils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
4219
      raise errors.OpPrereqError("Node not reachable by ping",
4220
                                 errors.ECODE_ENVIRON)
4221

    
4222
    if not newbie_singlehomed:
4223
      # check reachability from my secondary ip to newbie's secondary ip
4224
      if not netutils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
4225
                           source=myself.secondary_ip):
4226
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
4227
                                   " based ping to node daemon port",
4228
                                   errors.ECODE_ENVIRON)
4229

    
4230
    if self.op.readd:
4231
      exceptions = [node]
4232
    else:
4233
      exceptions = []
4234

    
4235
    if self.op.master_capable:
4236
      self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
4237
    else:
4238
      self.master_candidate = False
4239

    
4240
    if self.op.readd:
4241
      self.new_node = old_node
4242
    else:
4243
      node_group = cfg.LookupNodeGroup(self.op.group)
4244
      self.new_node = objects.Node(name=node,
4245
                                   primary_ip=primary_ip,
4246
                                   secondary_ip=secondary_ip,
4247
                                   master_candidate=self.master_candidate,
4248
                                   offline=False, drained=False,
4249
                                   group=node_group)
4250

    
4251
    if self.op.ndparams:
4252
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
4253

    
4254
  def Exec(self, feedback_fn):
4255
    """Adds the new node to the cluster.
4256

4257
    """
4258
    new_node = self.new_node
4259
    node = new_node.name
4260

    
4261
    # We adding a new node so we assume it's powered
4262
    new_node.powered = True
4263

    
4264
    # for re-adds, reset the offline/drained/master-candidate flags;
4265
    # we need to reset here, otherwise offline would prevent RPC calls
4266
    # later in the procedure; this also means that if the re-add
4267
    # fails, we are left with a non-offlined, broken node
4268
    if self.op.readd:
4269
      new_node.drained = new_node.offline = False # pylint: disable-msg=W0201
4270
      self.LogInfo("Readding a node, the offline/drained flags were reset")
4271
      # if we demote the node, we do cleanup later in the procedure
4272
      new_node.master_candidate = self.master_candidate
4273
      if self.changed_primary_ip:
4274
        new_node.primary_ip = self.op.primary_ip
4275

    
4276
    # copy the master/vm_capable flags
4277
    for attr in self._NFLAGS:
4278
      setattr(new_node, attr, getattr(self.op, attr))
4279

    
4280
    # notify the user about any possible mc promotion
4281
    if new_node.master_candidate:
4282
      self.LogInfo("Node will be a master candidate")
4283

    
4284
    if self.op.ndparams:
4285
      new_node.ndparams = self.op.ndparams
4286
    else:
4287
      new_node.ndparams = {}
4288

    
4289
    # check connectivity
4290
    result = self.rpc.call_version([node])[node]
4291
    result.Raise("Can't get version information from node %s" % node)
4292
    if constants.PROTOCOL_VERSION == result.payload:
4293
      logging.info("Communication to node %s fine, sw version %s match",
4294
                   node, result.payload)
4295
    else:
4296
      raise errors.OpExecError("Version mismatch master version %s,"
4297
                               " node version %s" %
4298
                               (constants.PROTOCOL_VERSION, result.payload))
4299

    
4300
    # Add node to our /etc/hosts, and add key to known_hosts
4301
    if self.cfg.GetClusterInfo().modify_etc_hosts:
4302
      master_node = self.cfg.GetMasterNode()
4303
      result = self.rpc.call_etc_hosts_modify(master_node,
4304
                                              constants.ETC_HOSTS_ADD,
4305
                                              self.hostname.name,
4306
                                              self.hostname.ip)
4307
      result.Raise("Can't update hosts file with new host data")
4308

    
4309
    if new_node.secondary_ip != new_node.primary_ip:
4310
      _CheckNodeHasSecondaryIP(self, new_node.name, new_node.secondary_ip,
4311
                               False)
4312

    
4313
    node_verify_list = [self.cfg.GetMasterNode()]
4314
    node_verify_param = {
4315
      constants.NV_NODELIST: [node],
4316
      # TODO: do a node-net-test as well?
4317
    }
4318

    
4319
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
4320
                                       self.cfg.GetClusterName())
4321
    for verifier in node_verify_list:
4322
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
4323
      nl_payload = result[verifier].payload[constants.NV_NODELIST]
4324
      if nl_payload:
4325
        for failed in nl_payload:
4326
          feedback_fn("ssh/hostname verification failed"
4327
                      " (checking from %s): %s" %
4328
                      (verifier, nl_payload[failed]))
4329
        raise errors.OpExecError("ssh/hostname verification failed")
4330

    
4331
    if self.op.readd:
4332
      _RedistributeAncillaryFiles(self)
4333
      self.context.ReaddNode(new_node)
4334
      # make sure we redistribute the config
4335
      self.cfg.Update(new_node, feedback_fn)
4336
      # and make sure the new node will not have old files around
4337
      if not new_node.master_candidate:
4338
        result = self.rpc.call_node_demote_from_mc(new_node.name)
4339
        msg = result.fail_msg
4340
        if msg:
4341
          self.LogWarning("Node failed to demote itself from master"
4342
                          " candidate status: %s" % msg)
4343
    else:
4344
      _RedistributeAncillaryFiles(self, additional_nodes=[node],
4345
                                  additional_vm=self.op.vm_capable)
4346
      self.context.AddNode(new_node, self.proc.GetECId())
4347

    
4348

    
4349
class LUNodeSetParams(LogicalUnit):
4350
  """Modifies the parameters of a node.
4351

4352
  @cvar _F2R: a dictionary from tuples of flags (mc, drained, offline)
4353
      to the node role (as _ROLE_*)
4354
  @cvar _R2F: a dictionary from node role to tuples of flags
4355
  @cvar _FLAGS: a list of attribute names corresponding to the flags
4356

4357
  """
4358
  HPATH = "node-modify"
4359
  HTYPE = constants.HTYPE_NODE
4360
  REQ_BGL = False
4361
  (_ROLE_CANDIDATE, _ROLE_DRAINED, _ROLE_OFFLINE, _ROLE_REGULAR) = range(4)
4362
  _F2R = {
4363
    (True, False, False): _ROLE_CANDIDATE,
4364
    (False, True, False): _ROLE_DRAINED,
4365
    (False, False, True): _ROLE_OFFLINE,
4366
    (False, False, False): _ROLE_REGULAR,
4367
    }
4368
  _R2F = dict((v, k) for k, v in _F2R.items())
4369
  _FLAGS = ["master_candidate", "drained", "offline"]
4370

    
4371
  def CheckArguments(self):
4372
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4373
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained,
4374
                self.op.master_capable, self.op.vm_capable,
4375
                self.op.secondary_ip, self.op.ndparams]
4376
    if all_mods.count(None) == len(all_mods):
4377
      raise errors.OpPrereqError("Please pass at least one modification",
4378
                                 errors.ECODE_INVAL)
4379
    if all_mods.count(True) > 1:
4380
      raise errors.OpPrereqError("Can't set the node into more than one"
4381
                                 " state at the same time",
4382
                                 errors.ECODE_INVAL)
4383

    
4384
    # Boolean value that tells us whether we might be demoting from MC
4385
    self.might_demote = (self.op.master_candidate == False or
4386
                         self.op.offline == True or
4387
                         self.op.drained == True or
4388
                         self.op.master_capable == False)
4389

    
4390
    if self.op.secondary_ip:
4391
      if not netutils.IP4Address.IsValid(self.op.secondary_ip):
4392
        raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
4393
                                   " address" % self.op.secondary_ip,
4394
                                   errors.ECODE_INVAL)
4395

    
4396
    self.lock_all = self.op.auto_promote and self.might_demote
4397
    self.lock_instances = self.op.secondary_ip is not None
4398

    
4399
  def ExpandNames(self):
4400
    if self.lock_all:
4401
      self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
4402
    else:
4403
      self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
4404

    
4405
    if self.lock_instances:
4406
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
4407

    
4408
  def DeclareLocks(self, level):
4409
    # If we have locked all instances, before waiting to lock nodes, release
4410
    # all the ones living on nodes unrelated to the current operation.
4411
    if level == locking.LEVEL_NODE and self.lock_instances:
4412
      instances_release = []
4413
      instances_keep = []
4414
      self.affected_instances = []
4415
      if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4416
        for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
4417
          instance = self.context.cfg.GetInstanceInfo(instance_name)
4418
          i_mirrored = instance.disk_template in constants.DTS_NET_MIRROR
4419
          if i_mirrored and self.op.node_name in instance.all_nodes:
4420
            instances_keep.append(instance_name)
4421
            self.affected_instances.append(instance)
4422
          else:
4423
            instances_release.append(instance_name)
4424
        if instances_release:
4425
          self.context.glm.release(locking.LEVEL_INSTANCE, instances_release)
4426
          self.acquired_locks[locking.LEVEL_INSTANCE] = instances_keep
4427

    
4428
  def BuildHooksEnv(self):
4429
    """Build hooks env.
4430

4431
    This runs on the master node.
4432

4433
    """
4434
    env = {
4435
      "OP_TARGET": self.op.node_name,
4436
      "MASTER_CANDIDATE": str(self.op.master_candidate),
4437
      "OFFLINE": str(self.op.offline),
4438
      "DRAINED": str(self.op.drained),
4439
      "MASTER_CAPABLE": str(self.op.master_capable),
4440
      "VM_CAPABLE": str(self.op.vm_capable),
4441
      }
4442
    nl = [self.cfg.GetMasterNode(),
4443
          self.op.node_name]
4444
    return env, nl, nl
4445

    
4446
  def CheckPrereq(self):
4447
    """Check prerequisites.
4448

4449
    This only checks the instance list against the existing names.
4450

4451
    """
4452
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
4453

    
4454
    if (self.op.master_candidate is not None or
4455
        self.op.drained is not None or
4456
        self.op.offline is not None):
4457
      # we can't change the master's node flags
4458
      if self.op.node_name == self.cfg.GetMasterNode():
4459
        raise errors.OpPrereqError("The master role can be changed"
4460
                                   " only via master-failover",
4461
                                   errors.ECODE_INVAL)
4462

    
4463
    if self.op.master_candidate and not node.master_capable:
4464
      raise errors.OpPrereqError("Node %s is not master capable, cannot make"
4465
                                 " it a master candidate" % node.name,
4466
                                 errors.ECODE_STATE)
4467

    
4468
    if self.op.vm_capable == False:
4469
      (ipri, isec) = self.cfg.GetNodeInstances(self.op.node_name)
4470
      if ipri or isec:
4471
        raise errors.OpPrereqError("Node %s hosts instances, cannot unset"
4472
                                   " the vm_capable flag" % node.name,
4473
                                   errors.ECODE_STATE)
4474

    
4475
    if node.master_candidate and self.might_demote and not self.lock_all:
4476
      assert not self.op.auto_promote, "auto_promote set but lock_all not"
4477
      # check if after removing the current node, we're missing master
4478
      # candidates
4479
      (mc_remaining, mc_should, _) = \
4480
          self.cfg.GetMasterCandidateStats(exceptions=[node.name])
4481
      if mc_remaining < mc_should:
4482
        raise errors.OpPrereqError("Not enough master candidates, please"
4483
                                   " pass auto promote option to allow"
4484
                                   " promotion", errors.ECODE_STATE)
4485

    
4486
    self.old_flags = old_flags = (node.master_candidate,
4487
                                  node.drained, node.offline)
4488
    assert old_flags in self._F2R, "Un-handled old flags  %s" % str(old_flags)
4489
    self.old_role = old_role = self._F2R[old_flags]
4490

    
4491
    # Check for ineffective changes
4492
    for attr in self._FLAGS:
4493
      if (getattr(self.op, attr) == False and getattr(node, attr) == False):
4494
        self.LogInfo("Ignoring request to unset flag %s, already unset", attr)
4495
        setattr(self.op, attr, None)
4496

    
4497
    # Past this point, any flag change to False means a transition
4498
    # away from the respective state, as only real changes are kept
4499

    
4500
    # TODO: We might query the real power state if it supports OOB
4501
    if _SupportsOob(self.cfg, node):
4502
      if self.op.offline is False and not (node.powered or
4503
                                           self.op.powered == True):
4504
        raise errors.OpPrereqError(("Please power on node %s first before you"
4505
                                    " can reset offline state") %
4506
                                   self.op.node_name)
4507
    elif self.op.powered is not None:
4508
      raise errors.OpPrereqError(("Unable to change powered state for node %s"
4509
                                  " which does not support out-of-band"
4510
                                  " handling") % self.op.node_name)
4511

    
4512
    # If we're being deofflined/drained, we'll MC ourself if needed
4513
    if (self.op.drained == False or self.op.offline == False or
4514
        (self.op.master_capable and not node.master_capable)):
4515
      if _DecideSelfPromotion(self):
4516
        self.op.master_candidate = True
4517
        self.LogInfo("Auto-promoting node to master candidate")
4518

    
4519
    # If we're no longer master capable, we'll demote ourselves from MC
4520
    if self.op.master_capable == False and node.master_candidate:
4521
      self.LogInfo("Demoting from master candidate")
4522
      self.op.master_candidate = False
4523

    
4524
    # Compute new role
4525
    assert [getattr(self.op, attr) for attr in self._FLAGS].count(True) <= 1
4526
    if self.op.master_candidate:
4527
      new_role = self._ROLE_CANDIDATE
4528
    elif self.op.drained:
4529
      new_role = self._ROLE_DRAINED
4530
    elif self.op.offline:
4531
      new_role = self._ROLE_OFFLINE
4532
    elif False in [self.op.master_candidate, self.op.drained, self.op.offline]:
4533
      # False is still in new flags, which means we're un-setting (the
4534
      # only) True flag
4535
      new_role = self._ROLE_REGULAR
4536
    else: # no new flags, nothing, keep old role
4537
      new_role = old_role
4538

    
4539
    self.new_role = new_role
4540

    
4541
    if old_role == self._ROLE_OFFLINE and new_role != old_role:
4542
      # Trying to transition out of offline status
4543
      result = self.rpc.call_version([node.name])[node.name]
4544
      if result.fail_msg:
4545
        raise errors.OpPrereqError("Node %s is being de-offlined but fails"
4546
                                   " to report its version: %s" %
4547
                                   (node.name, result.fail_msg),
4548
                                   errors.ECODE_STATE)
4549
      else:
4550
        self.LogWarning("Transitioning node from offline to online state"
4551
                        " without using re-add. Please make sure the node"
4552
                        " is healthy!")
4553

    
4554
    if self.op.secondary_ip:
4555
      # Ok even without locking, because this can't be changed by any LU
4556
      master = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
4557
      master_singlehomed = master.secondary_ip == master.primary_ip
4558
      if master_singlehomed and self.op.secondary_ip:
4559
        raise errors.OpPrereqError("Cannot change the secondary ip on a single"
4560
                                   " homed cluster", errors.ECODE_INVAL)
4561

    
4562
      if node.offline:
4563
        if self.affected_instances:
4564
          raise errors.OpPrereqError("Cannot change secondary ip: offline"
4565
                                     " node has instances (%s) configured"
4566
                                     " to use it" % self.affected_instances)
4567
      else:
4568
        # On online nodes, check that no instances are running, and that
4569
        # the node has the new ip and we can reach it.
4570
        for instance in self.affected_instances:
4571
          _CheckInstanceDown(self, instance, "cannot change secondary ip")
4572

    
4573
        _CheckNodeHasSecondaryIP(self, node.name, self.op.secondary_ip, True)
4574
        if master.name != node.name:
4575
          # check reachability from master secondary ip to new secondary ip
4576
          if not netutils.TcpPing(self.op.secondary_ip,
4577
                                  constants.DEFAULT_NODED_PORT,
4578
                                  source=master.secondary_ip):
4579
            raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
4580
                                       " based ping to node daemon port",
4581
                                       errors.ECODE_ENVIRON)
4582

    
4583
    if self.op.ndparams:
4584
      new_ndparams = _GetUpdatedParams(self.node.ndparams, self.op.ndparams)
4585
      utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
4586
      self.new_ndparams = new_ndparams
4587

    
4588
  def Exec(self, feedback_fn):
4589
    """Modifies a node.
4590

4591
    """
4592
    node = self.node
4593
    old_role = self.old_role
4594
    new_role = self.new_role
4595

    
4596
    result = []
4597

    
4598
    if self.op.ndparams:
4599
      node.ndparams = self.new_ndparams
4600

    
4601
    if self.op.powered is not None:
4602
      node.powered = self.op.powered
4603

    
4604
    for attr in ["master_capable", "vm_capable"]:
4605
      val = getattr(self.op, attr)
4606
      if val is not None:
4607
        setattr(node, attr, val)
4608
        result.append((attr, str(val)))
4609

    
4610
    if new_role != old_role:
4611
      # Tell the node to demote itself, if no longer MC and not offline
4612
      if old_role == self._ROLE_CANDIDATE and new_role != self._ROLE_OFFLINE:
4613
        msg = self.rpc.call_node_demote_from_mc(node.name).fail_msg
4614
        if msg:
4615
          self.LogWarning("Node failed to demote itself: %s", msg)
4616

    
4617
      new_flags = self._R2F[new_role]
4618
      for of, nf, desc in zip(self.old_flags, new_flags, self._FLAGS):
4619
        if of != nf:
4620
          result.append((desc, str(nf)))
4621
      (node.master_candidate, node.drained, node.offline) = new_flags
4622

    
4623
      # we locked all nodes, we adjust the CP before updating this node
4624
      if self.lock_all:
4625
        _AdjustCandidatePool(self, [node.name])
4626

    
4627
    if self.op.secondary_ip:
4628
      node.secondary_ip = self.op.secondary_ip
4629
      result.append(("secondary_ip", self.op.secondary_ip))
4630

    
4631
    # this will trigger configuration file update, if needed
4632
    self.cfg.Update(node, feedback_fn)
4633

    
4634
    # this will trigger job queue propagation or cleanup if the mc
4635
    # flag changed
4636
    if [old_role, new_role].count(self._ROLE_CANDIDATE) == 1:
4637
      self.context.ReaddNode(node)
4638

    
4639
    return result
4640

    
4641

    
4642
class LUNodePowercycle(NoHooksLU):
4643
  """Powercycles a node.
4644

4645
  """
4646
  REQ_BGL = False
4647

    
4648
  def CheckArguments(self):
4649
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4650
    if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
4651
      raise errors.OpPrereqError("The node is the master and the force"
4652
                                 " parameter was not set",
4653
                                 errors.ECODE_INVAL)
4654

    
4655
  def ExpandNames(self):
4656
    """Locking for PowercycleNode.
4657

4658
    This is a last-resort option and shouldn't block on other
4659
    jobs. Therefore, we grab no locks.
4660

4661
    """
4662
    self.needed_locks = {}
4663

    
4664
  def Exec(self, feedback_fn):
4665
    """Reboots a node.
4666

4667
    """
4668
    result = self.rpc.call_node_powercycle(self.op.node_name,
4669
                                           self.cfg.GetHypervisorType())
4670
    result.Raise("Failed to schedule the reboot")
4671
    return result.payload
4672

    
4673

    
4674
class LUClusterQuery(NoHooksLU):
4675
  """Query cluster configuration.
4676

4677
  """
4678
  REQ_BGL = False
4679

    
4680
  def ExpandNames(self):
4681
    self.needed_locks = {}
4682

    
4683
  def Exec(self, feedback_fn):
4684
    """Return cluster config.
4685

4686
    """
4687
    cluster = self.cfg.GetClusterInfo()
4688
    os_hvp = {}
4689

    
4690
    # Filter just for enabled hypervisors
4691
    for os_name, hv_dict in cluster.os_hvp.items():
4692
      os_hvp[os_name] = {}
4693
      for hv_name, hv_params in hv_dict.items():
4694
        if hv_name in cluster.enabled_hypervisors:
4695
          os_hvp[os_name][hv_name] = hv_params
4696

    
4697
    # Convert ip_family to ip_version
4698
    primary_ip_version = constants.IP4_VERSION
4699
    if cluster.primary_ip_family == netutils.IP6Address.family:
4700
      primary_ip_version = constants.IP6_VERSION
4701

    
4702
    result = {
4703
      "software_version": constants.RELEASE_VERSION,
4704
      "protocol_version": constants.PROTOCOL_VERSION,
4705
      "config_version": constants.CONFIG_VERSION,
4706
      "os_api_version": max(constants.OS_API_VERSIONS),
4707
      "export_version": constants.EXPORT_VERSION,
4708
      "architecture": (platform.architecture()[0], platform.machine()),
4709
      "name": cluster.cluster_name,
4710
      "master": cluster.master_node,
4711
      "default_hypervisor": cluster.enabled_hypervisors[0],
4712
      "enabled_hypervisors": cluster.enabled_hypervisors,
4713
      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
4714
                        for hypervisor_name in cluster.enabled_hypervisors]),
4715
      "os_hvp": os_hvp,
4716
      "beparams": cluster.beparams,
4717
      "osparams": cluster.osparams,
4718
      "nicparams": cluster.nicparams,
4719
      "ndparams": cluster.ndparams,
4720
      "candidate_pool_size": cluster.candidate_pool_size,
4721
      "master_netdev": cluster.master_netdev,
4722
      "volume_group_name": cluster.volume_group_name,
4723
      "drbd_usermode_helper": cluster.drbd_usermode_helper,
4724
      "file_storage_dir": cluster.file_storage_dir,
4725
      "maintain_node_health": cluster.maintain_node_health,
4726
      "ctime": cluster.ctime,
4727
      "mtime": cluster.mtime,
4728
      "uuid": cluster.uuid,
4729
      "tags": list(cluster.GetTags()),
4730
      "uid_pool": cluster.uid_pool,
4731
      "default_iallocator": cluster.default_iallocator,
4732
      "reserved_lvs": cluster.reserved_lvs,
4733
      "primary_ip_version": primary_ip_version,
4734
      "prealloc_wipe_disks": cluster.prealloc_wipe_disks,
4735
      "hidden_os": cluster.hidden_os,
4736
      "blacklisted_os": cluster.blacklisted_os,
4737
      }
4738

    
4739
    return result
4740

    
4741

    
4742
class LUClusterConfigQuery(NoHooksLU):
4743
  """Return configuration values.
4744

4745
  """
4746
  REQ_BGL = False
4747
  _FIELDS_DYNAMIC = utils.FieldSet()
4748
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag",
4749
                                  "watcher_pause", "volume_group_name")
4750

    
4751
  def CheckArguments(self):
4752
    _CheckOutputFields(static=self._FIELDS_STATIC,
4753
                       dynamic=self._FIELDS_DYNAMIC,
4754
                       selected=self.op.output_fields)
4755

    
4756
  def ExpandNames(self):
4757
    self.needed_locks = {}
4758

    
4759
  def Exec(self, feedback_fn):
4760
    """Dump a representation of the cluster config to the standard output.
4761

4762
    """
4763
    values = []
4764
    for field in self.op.output_fields:
4765
      if field == "cluster_name":
4766
        entry = self.cfg.GetClusterName()
4767
      elif field == "master_node":
4768
        entry = self.cfg.GetMasterNode()
4769
      elif field == "drain_flag":
4770
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
4771
      elif field == "watcher_pause":
4772
        entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
4773
      elif field == "volume_group_name":
4774
        entry = self.cfg.GetVGName()
4775
      else:
4776
        raise errors.ParameterError(field)
4777
      values.append(entry)
4778
    return values
4779

    
4780

    
4781
class LUInstanceActivateDisks(NoHooksLU):
4782
  """Bring up an instance's disks.
4783

4784
  """
4785
  REQ_BGL = False
4786

    
4787
  def ExpandNames(self):
4788
    self._ExpandAndLockInstance()
4789
    self.needed_locks[locking.LEVEL_NODE] = []
4790
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4791

    
4792
  def DeclareLocks(self, level):
4793
    if level == locking.LEVEL_NODE:
4794
      self._LockInstancesNodes()
4795

    
4796
  def CheckPrereq(self):
4797
    """Check prerequisites.
4798

4799
    This checks that the instance is in the cluster.
4800

4801
    """
4802
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4803
    assert self.instance is not None, \
4804
      "Cannot retrieve locked instance %s" % self.op.instance_name
4805
    _CheckNodeOnline(self, self.instance.primary_node)
4806

    
4807
  def Exec(self, feedback_fn):
4808
    """Activate the disks.
4809

4810
    """
4811
    disks_ok, disks_info = \
4812
              _AssembleInstanceDisks(self, self.instance,
4813
                                     ignore_size=self.op.ignore_size)
4814
    if not disks_ok:
4815
      raise errors.OpExecError("Cannot activate block devices")
4816

    
4817
    return disks_info
4818

    
4819

    
4820
def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
4821
                           ignore_size=False):
4822
  """Prepare the block devices for an instance.
4823

4824
  This sets up the block devices on all nodes.
4825

4826
  @type lu: L{LogicalUnit}
4827
  @param lu: the logical unit on whose behalf we execute
4828
  @type instance: L{objects.Instance}
4829
  @param instance: the instance for whose disks we assemble
4830
  @type disks: list of L{objects.Disk} or None
4831
  @param disks: which disks to assemble (or all, if None)
4832
  @type ignore_secondaries: boolean
4833
  @param ignore_secondaries: if true, errors on secondary nodes
4834
      won't result in an error return from the function
4835
  @type ignore_size: boolean
4836
  @param ignore_size: if true, the current known size of the disk
4837
      will not be used during the disk activation, useful for cases
4838
      when the size is wrong
4839
  @return: False if the operation failed, otherwise a list of
4840
      (host, instance_visible_name, node_visible_name)
4841
      with the mapping from node devices to instance devices
4842

4843
  """
4844
  device_info = []
4845
  disks_ok = True
4846
  iname = instance.name
4847
  disks = _ExpandCheckDisks(instance, disks)
4848

    
4849
  # With the two passes mechanism we try to reduce the window of
4850
  # opportunity for the race condition of switching DRBD to primary
4851
  # before handshaking occured, but we do not eliminate it
4852

    
4853
  # The proper fix would be to wait (with some limits) until the
4854
  # connection has been made and drbd transitions from WFConnection
4855
  # into any other network-connected state (Connected, SyncTarget,
4856
  # SyncSource, etc.)
4857

    
4858
  # 1st pass, assemble on all nodes in secondary mode
4859
  for idx, inst_disk in enumerate(disks):
4860
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
4861
      if ignore_size:
4862
        node_disk = node_disk.Copy()
4863
        node_disk.UnsetSize()
4864
      lu.cfg.SetDiskID(node_disk, node)
4865
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False, idx)
4866
      msg = result.fail_msg
4867
      if msg:
4868
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
4869
                           " (is_primary=False, pass=1): %s",
4870
                           inst_disk.iv_name, node, msg)
4871
        if not ignore_secondaries:
4872
          disks_ok = False
4873

    
4874
  # FIXME: race condition on drbd migration to primary
4875

    
4876
  # 2nd pass, do only the primary node
4877
  for idx, inst_disk in enumerate(disks):
4878
    dev_path = None
4879

    
4880
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
4881
      if node != instance.primary_node:
4882
        continue
4883
      if ignore_size:
4884
        node_disk = node_disk.Copy()
4885
        node_disk.UnsetSize()
4886
      lu.cfg.SetDiskID(node_disk, node)
4887
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True, idx)
4888
      msg = result.fail_msg
4889
      if msg:
4890
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
4891
                           " (is_primary=True, pass=2): %s",
4892
                           inst_disk.iv_name, node, msg)
4893
        disks_ok = False
4894
      else:
4895
        dev_path = result.payload
4896

    
4897
    device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
4898

    
4899
  # leave the disks configured for the primary node
4900
  # this is a workaround that would be fixed better by
4901
  # improving the logical/physical id handling
4902
  for disk in disks:
4903
    lu.cfg.SetDiskID(disk, instance.primary_node)
4904

    
4905
  return disks_ok, device_info
4906

    
4907

    
4908
def _StartInstanceDisks(lu, instance, force):
4909
  """Start the disks of an instance.
4910

4911
  """
4912
  disks_ok, _ = _AssembleInstanceDisks(lu, instance,
4913
                                           ignore_secondaries=force)
4914
  if not disks_ok:
4915
    _ShutdownInstanceDisks(lu, instance)
4916
    if force is not None and not force:
4917
      lu.proc.LogWarning("", hint="If the message above refers to a"
4918
                         " secondary node,"
4919
                         " you can retry the operation using '--force'.")
4920
    raise errors.OpExecError("Disk consistency error")
4921

    
4922

    
4923
class LUInstanceDeactivateDisks(NoHooksLU):
4924
  """Shutdown an instance's disks.
4925

4926
  """
4927
  REQ_BGL = False
4928

    
4929
  def ExpandNames(self):
4930
    self._ExpandAndLockInstance()
4931
    self.needed_locks[locking.LEVEL_NODE] = []
4932
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4933

    
4934
  def DeclareLocks(self, level):
4935
    if level == locking.LEVEL_NODE:
4936
      self._LockInstancesNodes()
4937

    
4938
  def CheckPrereq(self):
4939
    """Check prerequisites.
4940

4941
    This checks that the instance is in the cluster.
4942

4943
    """
4944
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4945
    assert self.instance is not None, \
4946
      "Cannot retrieve locked instance %s" % self.op.instance_name
4947

    
4948
  def Exec(self, feedback_fn):
4949
    """Deactivate the disks
4950

4951
    """
4952
    instance = self.instance
4953
    if self.op.force:
4954
      _ShutdownInstanceDisks(self, instance)
4955
    else:
4956
      _SafeShutdownInstanceDisks(self, instance)
4957

    
4958

    
4959
def _SafeShutdownInstanceDisks(lu, instance, disks=None):
4960
  """Shutdown block devices of an instance.
4961

4962
  This function checks if an instance is running, before calling
4963
  _ShutdownInstanceDisks.
4964

4965
  """
4966
  _CheckInstanceDown(lu, instance, "cannot shutdown disks")
4967
  _ShutdownInstanceDisks(lu, instance, disks=disks)
4968

    
4969

    
4970
def _ExpandCheckDisks(instance, disks):
4971
  """Return the instance disks selected by the disks list
4972

4973
  @type disks: list of L{objects.Disk} or None
4974
  @param disks: selected disks
4975
  @rtype: list of L{objects.Disk}
4976
  @return: selected instance disks to act on
4977

4978
  """
4979
  if disks is None:
4980
    return instance.disks
4981
  else:
4982
    if not set(disks).issubset(instance.disks):
4983
      raise errors.ProgrammerError("Can only act on disks belonging to the"
4984
                                   " target instance")
4985
    return disks
4986

    
4987

    
4988
def _ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
4989
  """Shutdown block devices of an instance.
4990

4991
  This does the shutdown on all nodes of the instance.
4992

4993
  If the ignore_primary is false, errors on the primary node are
4994
  ignored.
4995

4996
  """
4997
  all_result = True
4998
  disks = _ExpandCheckDisks(instance, disks)
4999

    
5000
  for disk in disks:
5001
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
5002
      lu.cfg.SetDiskID(top_disk, node)
5003
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
5004
      msg = result.fail_msg
5005
      if msg:
5006
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
5007
                      disk.iv_name, node, msg)
5008
        if ((node == instance.primary_node and not ignore_primary) or
5009
            (node != instance.primary_node and not result.offline)):
5010
          all_result = False
5011
  return all_result
5012

    
5013

    
5014
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
5015
  """Checks if a node has enough free memory.
5016

5017
  This function check if a given node has the needed amount of free
5018
  memory. In case the node has less memory or we cannot get the
5019
  information from the node, this function raise an OpPrereqError
5020
  exception.
5021

5022
  @type lu: C{LogicalUnit}
5023
  @param lu: a logical unit from which we get configuration data
5024
  @type node: C{str}
5025
  @param node: the node to check
5026
  @type reason: C{str}
5027
  @param reason: string to use in the error message
5028
  @type requested: C{int}
5029
  @param requested: the amount of memory in MiB to check for
5030
  @type hypervisor_name: C{str}
5031
  @param hypervisor_name: the hypervisor to ask for memory stats
5032
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
5033
      we cannot check the node
5034

5035
  """
5036
  nodeinfo = lu.rpc.call_node_info([node], None, hypervisor_name)
5037
  nodeinfo[node].Raise("Can't get data from node %s" % node,
5038
                       prereq=True, ecode=errors.ECODE_ENVIRON)
5039
  free_mem = nodeinfo[node].payload.get('memory_free', None)
5040
  if not isinstance(free_mem, int):
5041
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
5042
                               " was '%s'" % (node, free_mem),
5043
                               errors.ECODE_ENVIRON)
5044
  if requested > free_mem:
5045
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
5046
                               " needed %s MiB, available %s MiB" %
5047
                               (node, reason, requested, free_mem),
5048
                               errors.ECODE_NORES)
5049

    
5050

    
5051
def _CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
5052
  """Checks if nodes have enough free disk space in the all VGs.
5053

5054
  This function check if all given nodes have the needed amount of
5055
  free disk. In case any node has less disk or we cannot get the
5056
  information from the node, this function raise an OpPrereqError
5057
  exception.
5058

5059
  @type lu: C{LogicalUnit}
5060
  @param lu: a logical unit from which we get configuration data
5061
  @type nodenames: C{list}
5062
  @param nodenames: the list of node names to check
5063
  @type req_sizes: C{dict}
5064
  @param req_sizes: the hash of vg and corresponding amount of disk in
5065
      MiB to check for
5066
  @raise errors.OpPrereqError: if the node doesn't have enough disk,
5067
      or we cannot check the node
5068

5069
  """
5070
  for vg, req_size in req_sizes.items():
5071
    _CheckNodesFreeDiskOnVG(lu, nodenames, vg, req_size)
5072

    
5073

    
5074
def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
5075
  """Checks if nodes have enough free disk space in the specified VG.
5076

5077
  This function check if all given nodes have the needed amount of
5078
  free disk. In case any node has less disk or we cannot get the
5079
  information from the node, this function raise an OpPrereqError
5080
  exception.
5081

5082
  @type lu: C{LogicalUnit}
5083
  @param lu: a logical unit from which we get configuration data
5084
  @type nodenames: C{list}
5085
  @param nodenames: the list of node names to check
5086
  @type vg: C{str}
5087
  @param vg: the volume group to check
5088
  @type requested: C{int}
5089
  @param requested: the amount of disk in MiB to check for
5090
  @raise errors.OpPrereqError: if the node doesn't have enough disk,
5091
      or we cannot check the node
5092

5093
  """
5094
  nodeinfo = lu.rpc.call_node_info(nodenames, vg, None)
5095
  for node in nodenames:
5096
    info = nodeinfo[node]
5097
    info.Raise("Cannot get current information from node %s" % node,
5098
               prereq=True, ecode=errors.ECODE_ENVIRON)
5099
    vg_free = info.payload.get("vg_free", None)
5100
    if not isinstance(vg_free, int):
5101
      raise errors.OpPrereqError("Can't compute free disk space on node"
5102
                                 " %s for vg %s, result was '%s'" %
5103
                                 (node, vg, vg_free), errors.ECODE_ENVIRON)
5104
    if requested > vg_free:
5105
      raise errors.OpPrereqError("Not enough disk space on target node %s"
5106
                                 " vg %s: required %d MiB, available %d MiB" %
5107
                                 (node, vg, requested, vg_free),
5108
                                 errors.ECODE_NORES)
5109

    
5110

    
5111
class LUInstanceStartup(LogicalUnit):
5112
  """Starts an instance.
5113

5114
  """
5115
  HPATH = "instance-start"
5116
  HTYPE = constants.HTYPE_INSTANCE
5117
  REQ_BGL = False
5118

    
5119
  def CheckArguments(self):
5120
    # extra beparams
5121
    if self.op.beparams:
5122
      # fill the beparams dict
5123
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
5124

    
5125
  def ExpandNames(self):
5126
    self._ExpandAndLockInstance()
5127

    
5128
  def BuildHooksEnv(self):
5129
    """Build hooks env.
5130

5131
    This runs on master, primary and secondary nodes of the instance.
5132

5133
    """
5134
    env = {
5135
      "FORCE": self.op.force,
5136
      }
5137
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5138
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5139
    return env, nl, nl
5140

    
5141
  def CheckPrereq(self):
5142
    """Check prerequisites.
5143

5144
    This checks that the instance is in the cluster.
5145

5146
    """
5147
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5148
    assert self.instance is not None, \
5149
      "Cannot retrieve locked instance %s" % self.op.instance_name
5150

    
5151
    # extra hvparams
5152
    if self.op.hvparams:
5153
      # check hypervisor parameter syntax (locally)
5154
      cluster = self.cfg.GetClusterInfo()
5155
      utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
5156
      filled_hvp = cluster.FillHV(instance)
5157
      filled_hvp.update(self.op.hvparams)
5158
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
5159
      hv_type.CheckParameterSyntax(filled_hvp)
5160
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
5161

    
5162
    self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline
5163

    
5164
    if self.primary_offline and self.op.ignore_offline_nodes:
5165
      self.proc.LogWarning("Ignoring offline primary node")
5166

    
5167
      if self.op.hvparams or self.op.beparams:
5168
        self.proc.LogWarning("Overridden parameters are ignored")
5169
    else:
5170
      _CheckNodeOnline(self, instance.primary_node)
5171

    
5172
      bep = self.cfg.GetClusterInfo().FillBE(instance)
5173

    
5174
      # check bridges existence
5175
      _CheckInstanceBridgesExist(self, instance)
5176

    
5177
      remote_info = self.rpc.call_instance_info(instance.primary_node,
5178
                                                instance.name,
5179
                                                instance.hypervisor)
5180
      remote_info.Raise("Error checking node %s" % instance.primary_node,
5181
                        prereq=True, ecode=errors.ECODE_ENVIRON)
5182
      if not remote_info.payload: # not running already
5183
        _CheckNodeFreeMemory(self, instance.primary_node,
5184
                             "starting instance %s" % instance.name,
5185
                             bep[constants.BE_MEMORY], instance.hypervisor)
5186

    
5187
  def Exec(self, feedback_fn):
5188
    """Start the instance.
5189

5190
    """
5191
    instance = self.instance
5192
    force = self.op.force
5193

    
5194
    if not self.op.no_remember:
5195
      self.cfg.MarkInstanceUp(instance.name)
5196

    
5197
    if self.primary_offline:
5198
      assert self.op.ignore_offline_nodes
5199
      self.proc.LogInfo("Primary node offline, marked instance as started")
5200
    else:
5201
      node_current = instance.primary_node
5202

    
5203
      _StartInstanceDisks(self, instance, force)
5204

    
5205
      result = self.rpc.call_instance_start(node_current, instance,
5206
                                            self.op.hvparams, self.op.beparams)
5207
      msg = result.fail_msg
5208
      if msg:
5209
        _ShutdownInstanceDisks(self, instance)
5210
        raise errors.OpExecError("Could not start instance: %s" % msg)
5211

    
5212

    
5213
class LUInstanceReboot(LogicalUnit):
5214
  """Reboot an instance.
5215

5216
  """
5217
  HPATH = "instance-reboot"
5218
  HTYPE = constants.HTYPE_INSTANCE
5219
  REQ_BGL = False
5220

    
5221
  def ExpandNames(self):
5222
    self._ExpandAndLockInstance()
5223

    
5224
  def BuildHooksEnv(self):
5225
    """Build hooks env.
5226

5227
    This runs on master, primary and secondary nodes of the instance.
5228

5229
    """
5230
    env = {
5231
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
5232
      "REBOOT_TYPE": self.op.reboot_type,
5233
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5234
      }
5235
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5236
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5237
    return env, nl, nl
5238

    
5239
  def CheckPrereq(self):
5240
    """Check prerequisites.
5241

5242
    This checks that the instance is in the cluster.
5243

5244
    """
5245
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5246
    assert self.instance is not None, \
5247
      "Cannot retrieve locked instance %s" % self.op.instance_name
5248

    
5249
    _CheckNodeOnline(self, instance.primary_node)
5250

    
5251
    # check bridges existence
5252
    _CheckInstanceBridgesExist(self, instance)
5253

    
5254
  def Exec(self, feedback_fn):
5255
    """Reboot the instance.
5256

5257
    """
5258
    instance = self.instance
5259
    ignore_secondaries = self.op.ignore_secondaries
5260
    reboot_type = self.op.reboot_type
5261

    
5262
    node_current = instance.primary_node
5263

    
5264
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
5265
                       constants.INSTANCE_REBOOT_HARD]:
5266
      for disk in instance.disks:
5267
        self.cfg.SetDiskID(disk, node_current)
5268
      result = self.rpc.call_instance_reboot(node_current, instance,
5269
                                             reboot_type,
5270
                                             self.op.shutdown_timeout)
5271
      result.Raise("Could not reboot instance")
5272
    else:
5273
      result = self.rpc.call_instance_shutdown(node_current, instance,
5274
                                               self.op.shutdown_timeout)
5275
      result.Raise("Could not shutdown instance for full reboot")
5276
      _ShutdownInstanceDisks(self, instance)
5277
      _StartInstanceDisks(self, instance, ignore_secondaries)
5278
      result = self.rpc.call_instance_start(node_current, instance, None, None)
5279
      msg = result.fail_msg
5280
      if msg:
5281
        _ShutdownInstanceDisks(self, instance)
5282
        raise errors.OpExecError("Could not start instance for"
5283
                                 " full reboot: %s" % msg)
5284

    
5285
    self.cfg.MarkInstanceUp(instance.name)
5286

    
5287

    
5288
class LUInstanceShutdown(LogicalUnit):
5289
  """Shutdown an instance.
5290

5291
  """
5292
  HPATH = "instance-stop"
5293
  HTYPE = constants.HTYPE_INSTANCE
5294
  REQ_BGL = False
5295

    
5296
  def ExpandNames(self):
5297
    self._ExpandAndLockInstance()
5298

    
5299
  def BuildHooksEnv(self):
5300
    """Build hooks env.
5301

5302
    This runs on master, primary and secondary nodes of the instance.
5303

5304
    """
5305
    env = _BuildInstanceHookEnvByObject(self, self.instance)
5306
    env["TIMEOUT"] = self.op.timeout
5307
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5308
    return env, nl, nl
5309

    
5310
  def CheckPrereq(self):
5311
    """Check prerequisites.
5312

5313
    This checks that the instance is in the cluster.
5314

5315
    """
5316
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5317
    assert self.instance is not None, \
5318
      "Cannot retrieve locked instance %s" % self.op.instance_name
5319

    
5320
    self.primary_offline = \
5321
      self.cfg.GetNodeInfo(self.instance.primary_node).offline
5322

    
5323
    if self.primary_offline and self.op.ignore_offline_nodes:
5324
      self.proc.LogWarning("Ignoring offline primary node")
5325
    else:
5326
      _CheckNodeOnline(self, self.instance.primary_node)
5327

    
5328
  def Exec(self, feedback_fn):
5329
    """Shutdown the instance.
5330

5331
    """
5332
    instance = self.instance
5333
    node_current = instance.primary_node
5334
    timeout = self.op.timeout
5335

    
5336
    if not self.op.no_remember:
5337
      self.cfg.MarkInstanceDown(instance.name)
5338

    
5339
    if self.primary_offline:
5340
      assert self.op.ignore_offline_nodes
5341
      self.proc.LogInfo("Primary node offline, marked instance as stopped")
5342
    else:
5343
      result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
5344
      msg = result.fail_msg
5345
      if msg:
5346
        self.proc.LogWarning("Could not shutdown instance: %s" % msg)
5347

    
5348
      _ShutdownInstanceDisks(self, instance)
5349

    
5350

    
5351
class LUInstanceReinstall(LogicalUnit):
5352
  """Reinstall an instance.
5353

5354
  """
5355
  HPATH = "instance-reinstall"
5356
  HTYPE = constants.HTYPE_INSTANCE
5357
  REQ_BGL = False
5358

    
5359
  def ExpandNames(self):
5360
    self._ExpandAndLockInstance()
5361

    
5362
  def BuildHooksEnv(self):
5363
    """Build hooks env.
5364

5365
    This runs on master, primary and secondary nodes of the instance.
5366

5367
    """
5368
    env = _BuildInstanceHookEnvByObject(self, self.instance)
5369
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5370
    return env, nl, nl
5371

    
5372
  def CheckPrereq(self):
5373
    """Check prerequisites.
5374

5375
    This checks that the instance is in the cluster and is not running.
5376

5377
    """
5378
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5379
    assert instance is not None, \
5380
      "Cannot retrieve locked instance %s" % self.op.instance_name
5381
    _CheckNodeOnline(self, instance.primary_node, "Instance primary node"
5382
                     " offline, cannot reinstall")
5383
    for node in instance.secondary_nodes:
5384
      _CheckNodeOnline(self, node, "Instance secondary node offline,"
5385
                       " cannot reinstall")
5386

    
5387
    if instance.disk_template == constants.DT_DISKLESS:
5388
      raise errors.OpPrereqError("Instance '%s' has no disks" %
5389
                                 self.op.instance_name,
5390
                                 errors.ECODE_INVAL)
5391
    _CheckInstanceDown(self, instance, "cannot reinstall")
5392

    
5393
    if self.op.os_type is not None:
5394
      # OS verification
5395
      pnode = _ExpandNodeName(self.cfg, instance.primary_node)
5396
      _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
5397
      instance_os = self.op.os_type
5398
    else:
5399
      instance_os = instance.os
5400

    
5401
    nodelist = list(instance.all_nodes)
5402

    
5403
    if self.op.osparams:
5404
      i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
5405
      _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
5406
      self.os_inst = i_osdict # the new dict (without defaults)
5407
    else:
5408
      self.os_inst = None
5409

    
5410
    self.instance = instance
5411

    
5412
  def Exec(self, feedback_fn):
5413
    """Reinstall the instance.
5414

5415
    """
5416
    inst = self.instance
5417

    
5418
    if self.op.os_type is not None:
5419
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
5420
      inst.os = self.op.os_type
5421
      # Write to configuration
5422
      self.cfg.Update(inst, feedback_fn)
5423

    
5424
    _StartInstanceDisks(self, inst, None)
5425
    try:
5426
      feedback_fn("Running the instance OS create scripts...")
5427
      # FIXME: pass debug option from opcode to backend
5428
      result = self.rpc.call_instance_os_add(inst.primary_node, inst, True,
5429
                                             self.op.debug_level,
5430
                                             osparams=self.os_inst)
5431
      result.Raise("Could not install OS for instance %s on node %s" %
5432
                   (inst.name, inst.primary_node))
5433
    finally:
5434
      _ShutdownInstanceDisks(self, inst)
5435

    
5436

    
5437
class LUInstanceRecreateDisks(LogicalUnit):
5438
  """Recreate an instance's missing disks.
5439

5440
  """
5441
  HPATH = "instance-recreate-disks"
5442
  HTYPE = constants.HTYPE_INSTANCE
5443
  REQ_BGL = False
5444

    
5445
  def CheckArguments(self):
5446
    # normalise the disk list
5447
    self.op.disks = sorted(frozenset(self.op.disks))
5448

    
5449
  def ExpandNames(self):
5450
    self._ExpandAndLockInstance()
5451
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5452
    if self.op.nodes:
5453
      self.op.nodes = [_ExpandNodeName(self.cfg, n) for n in self.op.nodes]
5454
      self.needed_locks[locking.LEVEL_NODE] = list(self.op.nodes)
5455
    else:
5456
      self.needed_locks[locking.LEVEL_NODE] = []
5457

    
5458
  def DeclareLocks(self, level):
5459
    if level == locking.LEVEL_NODE:
5460
      # if we replace the nodes, we only need to lock the old primary,
5461
      # otherwise we need to lock all nodes for disk re-creation
5462
      primary_only = bool(self.op.nodes)
5463
      self._LockInstancesNodes(primary_only=primary_only)
5464

    
5465
  def BuildHooksEnv(self):
5466
    """Build hooks env.
5467

5468
    This runs on master, primary and secondary nodes of the instance.
5469

5470
    """
5471
    env = _BuildInstanceHookEnvByObject(self, self.instance)
5472
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5473
    return env, nl, nl
5474

    
5475
  def CheckPrereq(self):
5476
    """Check prerequisites.
5477

5478
    This checks that the instance is in the cluster and is not running.
5479

5480
    """
5481
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5482
    assert instance is not None, \
5483
      "Cannot retrieve locked instance %s" % self.op.instance_name
5484
    if self.op.nodes:
5485
      if len(self.op.nodes) != len(instance.all_nodes):
5486
        raise errors.OpPrereqError("Instance %s currently has %d nodes, but"
5487
                                   " %d replacement nodes were specified" %
5488
                                   (instance.name, len(instance.all_nodes),
5489
                                    len(self.op.nodes)),
5490
                                   errors.ECODE_INVAL)
5491
      assert instance.disk_template != constants.DT_DRBD8 or \
5492
          len(self.op.nodes) == 2
5493
      assert instance.disk_template != constants.DT_PLAIN or \
5494
          len(self.op.nodes) == 1
5495
      primary_node = self.op.nodes[0]
5496
    else:
5497
      primary_node = instance.primary_node
5498
    _CheckNodeOnline(self, primary_node)
5499

    
5500
    if instance.disk_template == constants.DT_DISKLESS:
5501
      raise errors.OpPrereqError("Instance '%s' has no disks" %
5502
                                 self.op.instance_name, errors.ECODE_INVAL)
5503
    # if we replace nodes *and* the old primary is offline, we don't
5504
    # check
5505
    assert instance.primary_node in self.needed_locks[locking.LEVEL_NODE]
5506
    old_pnode = self.cfg.GetNodeInfo(instance.primary_node)
5507
    if not (self.op.nodes and old_pnode.offline):
5508
      _CheckInstanceDown(self, instance, "cannot recreate disks")
5509

    
5510
    if not self.op.disks:
5511
      self.op.disks = range(len(instance.disks))
5512
    else:
5513
      for idx in self.op.disks:
5514
        if idx >= len(instance.disks):
5515
          raise errors.OpPrereqError("Invalid disk index passed '%s'" % idx,
5516
                                     errors.ECODE_INVAL)
5517
    if self.op.disks != range(len(instance.disks)) and self.op.nodes:
5518
      raise errors.OpPrereqError("Can't recreate disks partially and"
5519
                                 " change the nodes at the same time",
5520
                                 errors.ECODE_INVAL)
5521
    self.instance = instance
5522

    
5523
  def Exec(self, feedback_fn):
5524
    """Recreate the disks.
5525

5526
    """
5527
    # change primary node, if needed
5528
    if self.op.nodes:
5529
      self.instance.primary_node = self.op.nodes[0]
5530
      self.LogWarning("Changing the instance's nodes, you will have to"
5531
                      " remove any disks left on the older nodes manually")
5532

    
5533
    to_skip = []
5534
    for idx, disk in enumerate(self.instance.disks):
5535
      if idx not in self.op.disks: # disk idx has not been passed in
5536
        to_skip.append(idx)
5537
        continue
5538
      # update secondaries for disks, if needed
5539
      if self.op.nodes:
5540
        if disk.dev_type == constants.LD_DRBD8:
5541
          # need to update the nodes
5542
          assert len(self.op.nodes) == 2
5543
          logical_id = list(disk.logical_id)
5544
          logical_id[0] = self.op.nodes[0]
5545
          logical_id[1] = self.op.nodes[1]
5546
          disk.logical_id = tuple(logical_id)
5547

    
5548
    if self.op.nodes:
5549
      self.cfg.Update(self.instance, feedback_fn)
5550

    
5551
    _CreateDisks(self, self.instance, to_skip=to_skip)
5552

    
5553

    
5554
class LUInstanceRename(LogicalUnit):
5555
  """Rename an instance.
5556

5557
  """
5558
  HPATH = "instance-rename"
5559
  HTYPE = constants.HTYPE_INSTANCE
5560

    
5561
  def CheckArguments(self):
5562
    """Check arguments.
5563

5564
    """
5565
    if self.op.ip_check and not self.op.name_check:
5566
      # TODO: make the ip check more flexible and not depend on the name check
5567
      raise errors.OpPrereqError("Cannot do ip check without a name check",
5568
                                 errors.ECODE_INVAL)
5569

    
5570
  def BuildHooksEnv(self):
5571
    """Build hooks env.
5572

5573
    This runs on master, primary and secondary nodes of the instance.
5574

5575
    """
5576
    env = _BuildInstanceHookEnvByObject(self, self.instance)
5577
    env["INSTANCE_NEW_NAME"] = self.op.new_name
5578
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5579
    return env, nl, nl
5580

    
5581
  def CheckPrereq(self):
5582
    """Check prerequisites.
5583

5584
    This checks that the instance is in the cluster and is not running.
5585

5586
    """
5587
    self.op.instance_name = _ExpandInstanceName(self.cfg,
5588
                                                self.op.instance_name)
5589
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5590
    assert instance is not None
5591
    _CheckNodeOnline(self, instance.primary_node)
5592
    _CheckInstanceDown(self, instance, "cannot rename")
5593
    self.instance = instance
5594

    
5595
    new_name = self.op.new_name
5596
    if self.op.name_check:
5597
      hostname = netutils.GetHostname(name=new_name)
5598
      if hostname != new_name:
5599
        self.LogInfo("Resolved given name '%s' to '%s'", new_name,
5600
                     hostname.name)
5601
      new_name = self.op.new_name = hostname.name
5602
      if (self.op.ip_check and
5603
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
5604
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
5605
                                   (hostname.ip, new_name),
5606
                                   errors.ECODE_NOTUNIQUE)
5607

    
5608
    instance_list = self.cfg.GetInstanceList()
5609
    if new_name in instance_list and new_name != instance.name:
5610
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
5611
                                 new_name, errors.ECODE_EXISTS)
5612

    
5613
  def Exec(self, feedback_fn):
5614
    """Rename the instance.
5615

5616
    """
5617
    inst = self.instance
5618
    old_name = inst.name
5619

    
5620
    rename_file_storage = False
5621
    if (inst.disk_template == constants.DT_FILE and
5622
        self.op.new_name != inst.name):
5623
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
5624
      rename_file_storage = True
5625

    
5626
    self.cfg.RenameInstance(inst.name, self.op.new_name)
5627
    # Change the instance lock. This is definitely safe while we hold the BGL
5628
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
5629
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
5630

    
5631
    # re-read the instance from the configuration after rename
5632
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
5633

    
5634
    if rename_file_storage:
5635
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
5636
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
5637
                                                     old_file_storage_dir,
5638
                                                     new_file_storage_dir)
5639
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
5640
                   " (but the instance has been renamed in Ganeti)" %
5641
                   (inst.primary_node, old_file_storage_dir,
5642
                    new_file_storage_dir))
5643

    
5644
    _StartInstanceDisks(self, inst, None)
5645
    try:
5646
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
5647
                                                 old_name, self.op.debug_level)
5648
      msg = result.fail_msg
5649
      if msg:
5650
        msg = ("Could not run OS rename script for instance %s on node %s"
5651
               " (but the instance has been renamed in Ganeti): %s" %
5652
               (inst.name, inst.primary_node, msg))
5653
        self.proc.LogWarning(msg)
5654
    finally:
5655
      _ShutdownInstanceDisks(self, inst)
5656

    
5657
    return inst.name
5658

    
5659

    
5660
class LUInstanceRemove(LogicalUnit):
5661
  """Remove an instance.
5662

5663
  """
5664
  HPATH = "instance-remove"
5665
  HTYPE = constants.HTYPE_INSTANCE
5666
  REQ_BGL = False
5667

    
5668
  def ExpandNames(self):
5669
    self._ExpandAndLockInstance()
5670
    self.needed_locks[locking.LEVEL_NODE] = []
5671
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5672

    
5673
  def DeclareLocks(self, level):
5674
    if level == locking.LEVEL_NODE:
5675
      self._LockInstancesNodes()
5676

    
5677
  def BuildHooksEnv(self):
5678
    """Build hooks env.
5679

5680
    This runs on master, primary and secondary nodes of the instance.
5681

5682
    """
5683
    env = _BuildInstanceHookEnvByObject(self, self.instance)
5684
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
5685
    nl = [self.cfg.GetMasterNode()]
5686
    nl_post = list(self.instance.all_nodes) + nl
5687
    return env, nl, nl_post
5688

    
5689
  def CheckPrereq(self):
5690
    """Check prerequisites.
5691

5692
    This checks that the instance is in the cluster.
5693

5694
    """
5695
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5696
    assert self.instance is not None, \
5697
      "Cannot retrieve locked instance %s" % self.op.instance_name
5698

    
5699
  def Exec(self, feedback_fn):
5700
    """Remove the instance.
5701

5702
    """
5703
    instance = self.instance
5704
    logging.info("Shutting down instance %s on node %s",
5705
                 instance.name, instance.primary_node)
5706

    
5707
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
5708
                                             self.op.shutdown_timeout)
5709
    msg = result.fail_msg
5710
    if msg:
5711
      if self.op.ignore_failures:
5712
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
5713
      else:
5714
        raise errors.OpExecError("Could not shutdown instance %s on"
5715
                                 " node %s: %s" %
5716
                                 (instance.name, instance.primary_node, msg))
5717

    
5718
    _RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
5719

    
5720

    
5721
def _RemoveInstance(lu, feedback_fn, instance, ignore_failures):
5722
  """Utility function to remove an instance.
5723

5724
  """
5725
  logging.info("Removing block devices for instance %s", instance.name)
5726

    
5727
  if not _RemoveDisks(lu, instance):
5728
    if not ignore_failures:
5729
      raise errors.OpExecError("Can't remove instance's disks")
5730
    feedback_fn("Warning: can't remove instance's disks")
5731

    
5732
  logging.info("Removing instance %s out of cluster config", instance.name)
5733

    
5734
  lu.cfg.RemoveInstance(instance.name)
5735

    
5736
  assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
5737
    "Instance lock removal conflict"
5738

    
5739
  # Remove lock for the instance
5740
  lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
5741

    
5742

    
5743
class LUInstanceQuery(NoHooksLU):
5744
  """Logical unit for querying instances.
5745

5746
  """
5747
  # pylint: disable-msg=W0142
5748
  REQ_BGL = False
5749

    
5750
  def CheckArguments(self):
5751
    self.iq = _InstanceQuery(self.op.names, self.op.output_fields,
5752
                             self.op.use_locking)
5753

    
5754
  def ExpandNames(self):
5755
    self.iq.ExpandNames(self)
5756

    
5757
  def DeclareLocks(self, level):
5758
    self.iq.DeclareLocks(self, level)
5759

    
5760
  def Exec(self, feedback_fn):
5761
    return self.iq.OldStyleQuery(self)
5762

    
5763

    
5764
class LUInstanceFailover(LogicalUnit):
5765
  """Failover an instance.
5766

5767
  """
5768
  HPATH = "instance-failover"
5769
  HTYPE = constants.HTYPE_INSTANCE
5770
  REQ_BGL = False
5771

    
5772
  def ExpandNames(self):
5773
    self._ExpandAndLockInstance()
5774
    self.needed_locks[locking.LEVEL_NODE] = []
5775
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5776

    
5777
  def DeclareLocks(self, level):
5778
    if level == locking.LEVEL_NODE:
5779
      self._LockInstancesNodes()
5780

    
5781
  def BuildHooksEnv(self):
5782
    """Build hooks env.
5783

5784
    This runs on master, primary and secondary nodes of the instance.
5785

5786
    """
5787
    instance = self.instance
5788
    source_node = instance.primary_node
5789
    target_node = instance.secondary_nodes[0]
5790
    env = {
5791
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
5792
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5793
      "OLD_PRIMARY": source_node,
5794
      "OLD_SECONDARY": target_node,
5795
      "NEW_PRIMARY": target_node,
5796
      "NEW_SECONDARY": source_node,
5797
      }
5798
    env.update(_BuildInstanceHookEnvByObject(self, instance))
5799
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
5800
    nl_post = list(nl)
5801
    nl_post.append(source_node)
5802
    return env, nl, nl_post
5803

    
5804
  def CheckPrereq(self):
5805
    """Check prerequisites.
5806

5807
    This checks that the instance is in the cluster.
5808

5809
    """
5810
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5811
    assert self.instance is not None, \
5812
      "Cannot retrieve locked instance %s" % self.op.instance_name
5813

    
5814
    bep = self.cfg.GetClusterInfo().FillBE(instance)
5815
    if instance.disk_template not in constants.DTS_NET_MIRROR:
5816
      raise errors.OpPrereqError("Instance's disk layout is not"
5817
                                 " network mirrored, cannot failover.",
5818
                                 errors.ECODE_STATE)
5819

    
5820
    secondary_nodes = instance.secondary_nodes
5821
    if not secondary_nodes:
5822
      raise errors.ProgrammerError("no secondary node but using "
5823
                                   "a mirrored disk template")
5824

    
5825
    target_node = secondary_nodes[0]
5826
    _CheckNodeOnline(self, target_node)
5827
    _CheckNodeNotDrained(self, target_node)
5828
    if instance.admin_up:
5829
      # check memory requirements on the secondary node
5830
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
5831
                           instance.name, bep[constants.BE_MEMORY],
5832
                           instance.hypervisor)
5833
    else:
5834
      self.LogInfo("Not checking memory on the secondary node as"
5835
                   " instance will not be started")
5836

    
5837
    # check bridge existance
5838
    _CheckInstanceBridgesExist(self, instance, node=target_node)
5839

    
5840
  def Exec(self, feedback_fn):
5841
    """Failover an instance.
5842

5843
    The failover is done by shutting it down on its present node and
5844
    starting it on the secondary.
5845

5846
    """
5847
    instance = self.instance
5848
    primary_node = self.cfg.GetNodeInfo(instance.primary_node)
5849

    
5850
    source_node = instance.primary_node
5851
    target_node = instance.secondary_nodes[0]
5852

    
5853
    if instance.admin_up:
5854
      feedback_fn("* checking disk consistency between source and target")
5855
      for dev in instance.disks:
5856
        # for drbd, these are drbd over lvm
5857
        if not _CheckDiskConsistency(self, dev, target_node, False):
5858
          if not self.op.ignore_consistency:
5859
            raise errors.OpExecError("Disk %s is degraded on target node,"
5860
                                     " aborting failover." % dev.iv_name)
5861
    else:
5862
      feedback_fn("* not checking disk consistency as instance is not running")
5863

    
5864
    feedback_fn("* shutting down instance on source node")
5865
    logging.info("Shutting down instance %s on node %s",
5866
                 instance.name, source_node)
5867

    
5868
    result = self.rpc.call_instance_shutdown(source_node, instance,
5869
                                             self.op.shutdown_timeout)
5870
    msg = result.fail_msg
5871
    if msg:
5872
      if self.op.ignore_consistency or primary_node.offline:
5873
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
5874
                             " Proceeding anyway. Please make sure node"
5875
                             " %s is down. Error details: %s",
5876
                             instance.name, source_node, source_node, msg)
5877
      else:
5878
        raise errors.OpExecError("Could not shutdown instance %s on"
5879
                                 " node %s: %s" %
5880
                                 (instance.name, source_node, msg))
5881

    
5882
    feedback_fn("* deactivating the instance's disks on source node")
5883
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
5884
      raise errors.OpExecError("Can't shut down the instance's disks.")
5885

    
5886
    instance.primary_node = target_node
5887
    # distribute new instance config to the other nodes
5888
    self.cfg.Update(instance, feedback_fn)
5889

    
5890
    # Only start the instance if it's marked as up
5891
    if instance.admin_up:
5892
      feedback_fn("* activating the instance's disks on target node")
5893
      logging.info("Starting instance %s on node %s",
5894
                   instance.name, target_node)
5895

    
5896
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
5897
                                           ignore_secondaries=True)
5898
      if not disks_ok:
5899
        _ShutdownInstanceDisks(self, instance)
5900
        raise errors.OpExecError("Can't activate the instance's disks")
5901

    
5902
      feedback_fn("* starting the instance on the target node")
5903
      result = self.rpc.call_instance_start(target_node, instance, None, None)
5904
      msg = result.fail_msg
5905
      if msg:
5906
        _ShutdownInstanceDisks(self, instance)
5907
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
5908
                                 (instance.name, target_node, msg))
5909

    
5910

    
5911
class LUInstanceMigrate(LogicalUnit):
5912
  """Migrate an instance.
5913

5914
  This is migration without shutting down, compared to the failover,
5915
  which is done with shutdown.
5916

5917
  """
5918
  HPATH = "instance-migrate"
5919
  HTYPE = constants.HTYPE_INSTANCE
5920
  REQ_BGL = False
5921

    
5922
  def ExpandNames(self):
5923
    self._ExpandAndLockInstance()
5924

    
5925
    self.needed_locks[locking.LEVEL_NODE] = []
5926
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5927

    
5928
    self._migrater = TLMigrateInstance(self, self.op.instance_name,
5929
                                       self.op.cleanup)
5930
    self.tasklets = [self._migrater]
5931

    
5932
  def DeclareLocks(self, level):
5933
    if level == locking.LEVEL_NODE:
5934
      self._LockInstancesNodes()
5935

    
5936
  def BuildHooksEnv(self):
5937
    """Build hooks env.
5938

5939
    This runs on master, primary and secondary nodes of the instance.
5940

5941
    """
5942
    instance = self._migrater.instance
5943
    source_node = instance.primary_node
5944
    target_node = instance.secondary_nodes[0]
5945
    env = _BuildInstanceHookEnvByObject(self, instance)
5946
    env["MIGRATE_LIVE"] = self._migrater.live
5947
    env["MIGRATE_CLEANUP"] = self.op.cleanup
5948
    env.update({
5949
        "OLD_PRIMARY": source_node,
5950
        "OLD_SECONDARY": target_node,
5951
        "NEW_PRIMARY": target_node,
5952
        "NEW_SECONDARY": source_node,
5953
        })
5954
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
5955
    nl_post = list(nl)
5956
    nl_post.append(source_node)
5957
    return env, nl, nl_post
5958

    
5959

    
5960
class LUInstanceMove(LogicalUnit):
5961
  """Move an instance by data-copying.
5962

5963
  """
5964
  HPATH = "instance-move"
5965
  HTYPE = constants.HTYPE_INSTANCE
5966
  REQ_BGL = False
5967

    
5968
  def ExpandNames(self):
5969
    self._ExpandAndLockInstance()
5970
    target_node = _ExpandNodeName(self.cfg, self.op.target_node)
5971
    self.op.target_node = target_node
5972
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
5973
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5974

    
5975
  def DeclareLocks(self, level):
5976
    if level == locking.LEVEL_NODE:
5977
      self._LockInstancesNodes(primary_only=True)
5978

    
5979
  def BuildHooksEnv(self):
5980
    """Build hooks env.
5981

5982
    This runs on master, primary and secondary nodes of the instance.
5983

5984
    """
5985
    env = {
5986
      "TARGET_NODE": self.op.target_node,
5987
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5988
      }
5989
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5990
    nl = [self.cfg.GetMasterNode()] + [self.instance.primary_node,
5991
                                       self.op.target_node]
5992
    return env, nl, nl
5993

    
5994
  def CheckPrereq(self):
5995
    """Check prerequisites.
5996

5997
    This checks that the instance is in the cluster.
5998

5999
    """
6000
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6001
    assert self.instance is not None, \
6002
      "Cannot retrieve locked instance %s" % self.op.instance_name
6003

    
6004
    node = self.cfg.GetNodeInfo(self.op.target_node)
6005
    assert node is not None, \
6006
      "Cannot retrieve locked node %s" % self.op.target_node
6007

    
6008
    self.target_node = target_node = node.name
6009

    
6010
    if target_node == instance.primary_node:
6011
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
6012
                                 (instance.name, target_node),
6013
                                 errors.ECODE_STATE)
6014

    
6015
    bep = self.cfg.GetClusterInfo().FillBE(instance)
6016

    
6017
    for idx, dsk in enumerate(instance.disks):
6018
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
6019
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
6020
                                   " cannot copy" % idx, errors.ECODE_STATE)
6021

    
6022
    _CheckNodeOnline(self, target_node)
6023
    _CheckNodeNotDrained(self, target_node)
6024
    _CheckNodeVmCapable(self, target_node)
6025

    
6026
    if instance.admin_up:
6027
      # check memory requirements on the secondary node
6028
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
6029
                           instance.name, bep[constants.BE_MEMORY],
6030
                           instance.hypervisor)
6031
    else:
6032
      self.LogInfo("Not checking memory on the secondary node as"
6033
                   " instance will not be started")
6034

    
6035
    # check bridge existance
6036
    _CheckInstanceBridgesExist(self, instance, node=target_node)
6037

    
6038
  def Exec(self, feedback_fn):
6039
    """Move an instance.
6040

6041
    The move is done by shutting it down on its present node, copying
6042
    the data over (slow) and starting it on the new node.
6043

6044
    """
6045
    instance = self.instance
6046

    
6047
    source_node = instance.primary_node
6048
    target_node = self.target_node
6049

    
6050
    self.LogInfo("Shutting down instance %s on source node %s",
6051
                 instance.name, source_node)
6052

    
6053
    result = self.rpc.call_instance_shutdown(source_node, instance,
6054
                                             self.op.shutdown_timeout)
6055
    msg = result.fail_msg
6056
    if msg:
6057
      if self.op.ignore_consistency:
6058
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
6059
                             " Proceeding anyway. Please make sure node"
6060
                             " %s is down. Error details: %s",
6061
                             instance.name, source_node, source_node, msg)
6062
      else:
6063
        raise errors.OpExecError("Could not shutdown instance %s on"
6064
                                 " node %s: %s" %
6065
                                 (instance.name, source_node, msg))
6066

    
6067
    # create the target disks
6068
    try:
6069
      _CreateDisks(self, instance, target_node=target_node)
6070
    except errors.OpExecError:
6071
      self.LogWarning("Device creation failed, reverting...")
6072
      try:
6073
        _RemoveDisks(self, instance, target_node=target_node)
6074
      finally:
6075
        self.cfg.ReleaseDRBDMinors(instance.name)
6076
        raise
6077

    
6078
    cluster_name = self.cfg.GetClusterInfo().cluster_name
6079

    
6080
    errs = []
6081
    # activate, get path, copy the data over
6082
    for idx, disk in enumerate(instance.disks):
6083
      self.LogInfo("Copying data for disk %d", idx)
6084
      result = self.rpc.call_blockdev_assemble(target_node, disk,
6085
                                               instance.name, True, idx)
6086
      if result.fail_msg:
6087
        self.LogWarning("Can't assemble newly created disk %d: %s",
6088
                        idx, result.fail_msg)
6089
        errs.append(result.fail_msg)
6090
        break
6091
      dev_path = result.payload
6092
      result = self.rpc.call_blockdev_export(source_node, disk,
6093
                                             target_node, dev_path,
6094
                                             cluster_name)
6095
      if result.fail_msg:
6096
        self.LogWarning("Can't copy data over for disk %d: %s",
6097
                        idx, result.fail_msg)
6098
        errs.append(result.fail_msg)
6099
        break
6100

    
6101
    if errs:
6102
      self.LogWarning("Some disks failed to copy, aborting")
6103
      try:
6104
        _RemoveDisks(self, instance, target_node=target_node)
6105
      finally:
6106
        self.cfg.ReleaseDRBDMinors(instance.name)
6107
        raise errors.OpExecError("Errors during disk copy: %s" %
6108
                                 (",".join(errs),))
6109

    
6110
    instance.primary_node = target_node
6111
    self.cfg.Update(instance, feedback_fn)
6112

    
6113
    self.LogInfo("Removing the disks on the original node")
6114
    _RemoveDisks(self, instance, target_node=source_node)
6115

    
6116
    # Only start the instance if it's marked as up
6117
    if instance.admin_up:
6118
      self.LogInfo("Starting instance %s on node %s",
6119
                   instance.name, target_node)
6120

    
6121
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
6122
                                           ignore_secondaries=True)
6123
      if not disks_ok:
6124
        _ShutdownInstanceDisks(self, instance)
6125
        raise errors.OpExecError("Can't activate the instance's disks")
6126

    
6127
      result = self.rpc.call_instance_start(target_node, instance, None, None)
6128
      msg = result.fail_msg
6129
      if msg:
6130
        _ShutdownInstanceDisks(self, instance)
6131
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
6132
                                 (instance.name, target_node, msg))
6133

    
6134

    
6135
class LUNodeMigrate(LogicalUnit):
6136
  """Migrate all instances from a node.
6137

6138
  """
6139
  HPATH = "node-migrate"
6140
  HTYPE = constants.HTYPE_NODE
6141
  REQ_BGL = False
6142

    
6143
  def ExpandNames(self):
6144
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
6145

    
6146
    self.needed_locks = {
6147
      locking.LEVEL_NODE: [self.op.node_name],
6148
      }
6149

    
6150
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6151

    
6152
    # Create tasklets for migrating instances for all instances on this node
6153
    names = []
6154
    tasklets = []
6155

    
6156
    for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name):
6157
      logging.debug("Migrating instance %s", inst.name)
6158
      names.append(inst.name)
6159

    
6160
      tasklets.append(TLMigrateInstance(self, inst.name, False))
6161

    
6162
    self.tasklets = tasklets
6163

    
6164
    # Declare instance locks
6165
    self.needed_locks[locking.LEVEL_INSTANCE] = names
6166

    
6167
  def DeclareLocks(self, level):
6168
    if level == locking.LEVEL_NODE:
6169
      self._LockInstancesNodes()
6170

    
6171
  def BuildHooksEnv(self):
6172
    """Build hooks env.
6173

6174
    This runs on the master, the primary and all the secondaries.
6175

6176
    """
6177
    env = {
6178
      "NODE_NAME": self.op.node_name,
6179
      }
6180

    
6181
    nl = [self.cfg.GetMasterNode()]
6182

    
6183
    return (env, nl, nl)
6184

    
6185

    
6186
class TLMigrateInstance(Tasklet):
6187
  """Tasklet class for instance migration.
6188

6189
  @type live: boolean
6190
  @ivar live: whether the migration will be done live or non-live;
6191
      this variable is initalized only after CheckPrereq has run
6192

6193
  """
6194
  def __init__(self, lu, instance_name, cleanup):
6195
    """Initializes this class.
6196

6197
    """
6198
    Tasklet.__init__(self, lu)
6199

    
6200
    # Parameters
6201
    self.instance_name = instance_name
6202
    self.cleanup = cleanup
6203
    self.live = False # will be overridden later
6204

    
6205
  def CheckPrereq(self):
6206
    """Check prerequisites.
6207

6208
    This checks that the instance is in the cluster.
6209

6210
    """
6211
    instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
6212
    instance = self.cfg.GetInstanceInfo(instance_name)
6213
    assert instance is not None
6214

    
6215
    if instance.disk_template != constants.DT_DRBD8:
6216
      raise errors.OpPrereqError("Instance's disk layout is not"
6217
                                 " drbd8, cannot migrate.", errors.ECODE_STATE)
6218

    
6219
    secondary_nodes = instance.secondary_nodes
6220
    if not secondary_nodes:
6221
      raise errors.ConfigurationError("No secondary node but using"
6222
                                      " drbd8 disk template")
6223

    
6224
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
6225

    
6226
    target_node = secondary_nodes[0]
6227
    # check memory requirements on the secondary node
6228
    _CheckNodeFreeMemory(self.lu, target_node, "migrating instance %s" %
6229
                         instance.name, i_be[constants.BE_MEMORY],
6230
                         instance.hypervisor)
6231

    
6232
    # check bridge existance
6233
    _CheckInstanceBridgesExist(self.lu, instance, node=target_node)
6234

    
6235
    if not self.cleanup:
6236
      _CheckNodeNotDrained(self.lu, target_node)
6237
      result = self.rpc.call_instance_migratable(instance.primary_node,
6238
                                                 instance)
6239
      result.Raise("Can't migrate, please use failover",
6240
                   prereq=True, ecode=errors.ECODE_STATE)
6241

    
6242
    self.instance = instance
6243

    
6244
    if self.lu.op.live is not None and self.lu.op.mode is not None:
6245
      raise errors.OpPrereqError("Only one of the 'live' and 'mode'"
6246
                                 " parameters are accepted",
6247
                                 errors.ECODE_INVAL)
6248
    if self.lu.op.live is not None:
6249
      if self.lu.op.live:
6250
        self.lu.op.mode = constants.HT_MIGRATION_LIVE
6251
      else:
6252
        self.lu.op.mode = constants.HT_MIGRATION_NONLIVE
6253
      # reset the 'live' parameter to None so that repeated
6254
      # invocations of CheckPrereq do not raise an exception
6255
      self.lu.op.live = None
6256
    elif self.lu.op.mode is None:
6257
      # read the default value from the hypervisor
6258
      i_hv = self.cfg.GetClusterInfo().FillHV(instance, skip_globals=False)
6259
      self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
6260

    
6261
    self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
6262

    
6263
  def _WaitUntilSync(self):
6264
    """Poll with custom rpc for disk sync.
6265

6266
    This uses our own step-based rpc call.
6267

6268
    """
6269
    self.feedback_fn("* wait until resync is done")
6270
    all_done = False
6271
    while not all_done:
6272
      all_done = True
6273
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
6274
                                            self.nodes_ip,
6275
                                            self.instance.disks)
6276
      min_percent = 100
6277
      for node, nres in result.items():
6278
        nres.Raise("Cannot resync disks on node %s" % node)
6279
        node_done, node_percent = nres.payload
6280
        all_done = all_done and node_done
6281
        if node_percent is not None:
6282
          min_percent = min(min_percent, node_percent)
6283
      if not all_done:
6284
        if min_percent < 100:
6285
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
6286
        time.sleep(2)
6287

    
6288
  def _EnsureSecondary(self, node):
6289
    """Demote a node to secondary.
6290

6291
    """
6292
    self.feedback_fn("* switching node %s to secondary mode" % node)
6293

    
6294
    for dev in self.instance.disks:
6295
      self.cfg.SetDiskID(dev, node)
6296

    
6297
    result = self.rpc.call_blockdev_close(node, self.instance.name,
6298
                                          self.instance.disks)
6299
    result.Raise("Cannot change disk to secondary on node %s" % node)
6300

    
6301
  def _GoStandalone(self):
6302
    """Disconnect from the network.
6303

6304
    """
6305
    self.feedback_fn("* changing into standalone mode")
6306
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
6307
                                               self.instance.disks)
6308
    for node, nres in result.items():
6309
      nres.Raise("Cannot disconnect disks node %s" % node)
6310

    
6311
  def _GoReconnect(self, multimaster):
6312
    """Reconnect to the network.
6313

6314
    """
6315
    if multimaster:
6316
      msg = "dual-master"
6317
    else:
6318
      msg = "single-master"
6319
    self.feedback_fn("* changing disks into %s mode" % msg)
6320
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
6321
                                           self.instance.disks,
6322
                                           self.instance.name, multimaster)
6323
    for node, nres in result.items():
6324
      nres.Raise("Cannot change disks config on node %s" % node)
6325

    
6326
  def _ExecCleanup(self):
6327
    """Try to cleanup after a failed migration.
6328

6329
    The cleanup is done by:
6330
      - check that the instance is running only on one node
6331
        (and update the config if needed)
6332
      - change disks on its secondary node to secondary
6333
      - wait until disks are fully synchronized
6334
      - disconnect from the network
6335
      - change disks into single-master mode
6336
      - wait again until disks are fully synchronized
6337

6338
    """
6339
    instance = self.instance
6340
    target_node = self.target_node
6341
    source_node = self.source_node
6342

    
6343
    # check running on only one node
6344
    self.feedback_fn("* checking where the instance actually runs"
6345
                     " (if this hangs, the hypervisor might be in"
6346
                     " a bad state)")
6347
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
6348
    for node, result in ins_l.items():
6349
      result.Raise("Can't contact node %s" % node)
6350

    
6351
    runningon_source = instance.name in ins_l[source_node].payload
6352
    runningon_target = instance.name in ins_l[target_node].payload
6353

    
6354
    if runningon_source and runningon_target:
6355
      raise errors.OpExecError("Instance seems to be running on two nodes,"
6356
                               " or the hypervisor is confused. You will have"
6357
                               " to ensure manually that it runs only on one"
6358
                               " and restart this operation.")
6359

    
6360
    if not (runningon_source or runningon_target):
6361
      raise errors.OpExecError("Instance does not seem to be running at all."
6362
                               " In this case, it's safer to repair by"
6363
                               " running 'gnt-instance stop' to ensure disk"
6364
                               " shutdown, and then restarting it.")
6365

    
6366
    if runningon_target:
6367
      # the migration has actually succeeded, we need to update the config
6368
      self.feedback_fn("* instance running on secondary node (%s),"
6369
                       " updating config" % target_node)
6370
      instance.primary_node = target_node
6371
      self.cfg.Update(instance, self.feedback_fn)
6372
      demoted_node = source_node
6373
    else:
6374
      self.feedback_fn("* instance confirmed to be running on its"
6375
                       " primary node (%s)" % source_node)
6376
      demoted_node = target_node
6377

    
6378
    self._EnsureSecondary(demoted_node)
6379
    try:
6380
      self._WaitUntilSync()
6381
    except errors.OpExecError:
6382
      # we ignore here errors, since if the device is standalone, it
6383
      # won't be able to sync
6384
      pass
6385
    self._GoStandalone()
6386
    self._GoReconnect(False)
6387
    self._WaitUntilSync()
6388

    
6389
    self.feedback_fn("* done")
6390

    
6391
  def _RevertDiskStatus(self):
6392
    """Try to revert the disk status after a failed migration.
6393

6394
    """
6395
    target_node = self.target_node
6396
    try:
6397
      self._EnsureSecondary(target_node)
6398
      self._GoStandalone()
6399
      self._GoReconnect(False)
6400
      self._WaitUntilSync()
6401
    except errors.OpExecError, err:
6402
      self.lu.LogWarning("Migration failed and I can't reconnect the"
6403
                         " drives: error '%s'\n"
6404
                         "Please look and recover the instance status" %
6405
                         str(err))
6406

    
6407
  def _AbortMigration(self):
6408
    """Call the hypervisor code to abort a started migration.
6409

6410
    """
6411
    instance = self.instance
6412
    target_node = self.target_node
6413
    migration_info = self.migration_info
6414

    
6415
    abort_result = self.rpc.call_finalize_migration(target_node,
6416
                                                    instance,
6417
                                                    migration_info,
6418
                                                    False)
6419
    abort_msg = abort_result.fail_msg
6420
    if abort_msg:
6421
      logging.error("Aborting migration failed on target node %s: %s",
6422
                    target_node, abort_msg)
6423
      # Don't raise an exception here, as we stil have to try to revert the
6424
      # disk status, even if this step failed.
6425

    
6426
  def _ExecMigration(self):
6427
    """Migrate an instance.
6428

6429
    The migrate is done by:
6430
      - change the disks into dual-master mode
6431
      - wait until disks are fully synchronized again
6432
      - migrate the instance
6433
      - change disks on the new secondary node (the old primary) to secondary
6434
      - wait until disks are fully synchronized
6435
      - change disks into single-master mode
6436

6437
    """
6438
    instance = self.instance
6439
    target_node = self.target_node
6440
    source_node = self.source_node
6441

    
6442
    self.feedback_fn("* checking disk consistency between source and target")
6443
    for dev in instance.disks:
6444
      if not _CheckDiskConsistency(self.lu, dev, target_node, False):
6445
        raise errors.OpExecError("Disk %s is degraded or not fully"
6446
                                 " synchronized on target node,"
6447
                                 " aborting migrate." % dev.iv_name)
6448

    
6449
    # First get the migration information from the remote node
6450
    result = self.rpc.call_migration_info(source_node, instance)
6451
    msg = result.fail_msg
6452
    if msg:
6453
      log_err = ("Failed fetching source migration information from %s: %s" %
6454
                 (source_node, msg))
6455
      logging.error(log_err)
6456
      raise errors.OpExecError(log_err)
6457

    
6458
    self.migration_info = migration_info = result.payload
6459

    
6460
    # Then switch the disks to master/master mode
6461
    self._EnsureSecondary(target_node)
6462
    self._GoStandalone()
6463
    self._GoReconnect(True)
6464
    self._WaitUntilSync()
6465

    
6466
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
6467
    result = self.rpc.call_accept_instance(target_node,
6468
                                           instance,
6469
                                           migration_info,
6470
                                           self.nodes_ip[target_node])
6471

    
6472
    msg = result.fail_msg
6473
    if msg:
6474
      logging.error("Instance pre-migration failed, trying to revert"
6475
                    " disk status: %s", msg)
6476
      self.feedback_fn("Pre-migration failed, aborting")
6477
      self._AbortMigration()
6478
      self._RevertDiskStatus()
6479
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
6480
                               (instance.name, msg))
6481

    
6482
    self.feedback_fn("* migrating instance to %s" % target_node)
6483
    time.sleep(10)
6484
    result = self.rpc.call_instance_migrate(source_node, instance,
6485
                                            self.nodes_ip[target_node],
6486
                                            self.live)
6487
    msg = result.fail_msg
6488
    if msg:
6489
      logging.error("Instance migration failed, trying to revert"
6490
                    " disk status: %s", msg)
6491
      self.feedback_fn("Migration failed, aborting")
6492
      self._AbortMigration()
6493
      self._RevertDiskStatus()
6494
      raise errors.OpExecError("Could not migrate instance %s: %s" %
6495
                               (instance.name, msg))
6496
    time.sleep(10)
6497

    
6498
    instance.primary_node = target_node
6499
    # distribute new instance config to the other nodes
6500
    self.cfg.Update(instance, self.feedback_fn)
6501

    
6502
    result = self.rpc.call_finalize_migration(target_node,
6503
                                              instance,
6504
                                              migration_info,
6505
                                              True)
6506
    msg = result.fail_msg
6507
    if msg:
6508
      logging.error("Instance migration succeeded, but finalization failed:"
6509
                    " %s", msg)
6510
      raise errors.OpExecError("Could not finalize instance migration: %s" %
6511
                               msg)
6512

    
6513
    self._EnsureSecondary(source_node)
6514
    self._WaitUntilSync()
6515
    self._GoStandalone()
6516
    self._GoReconnect(False)
6517
    self._WaitUntilSync()
6518

    
6519
    self.feedback_fn("* done")
6520

    
6521
  def Exec(self, feedback_fn):
6522
    """Perform the migration.
6523

6524
    """
6525
    feedback_fn("Migrating instance %s" % self.instance.name)
6526

    
6527
    self.feedback_fn = feedback_fn
6528

    
6529
    self.source_node = self.instance.primary_node
6530
    self.target_node = self.instance.secondary_nodes[0]
6531
    self.all_nodes = [self.source_node, self.target_node]
6532
    self.nodes_ip = {
6533
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
6534
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
6535
      }
6536

    
6537
    if self.cleanup:
6538
      return self._ExecCleanup()
6539
    else:
6540
      return self._ExecMigration()
6541

    
6542

    
6543
def _CreateBlockDev(lu, node, instance, device, force_create,
6544
                    info, force_open):
6545
  """Create a tree of block devices on a given node.
6546

6547
  If this device type has to be created on secondaries, create it and
6548
  all its children.
6549

6550
  If not, just recurse to children keeping the same 'force' value.
6551

6552
  @param lu: the lu on whose behalf we execute
6553
  @param node: the node on which to create the device
6554
  @type instance: L{objects.Instance}
6555
  @param instance: the instance which owns the device
6556
  @type device: L{objects.Disk}
6557
  @param device: the device to create
6558
  @type force_create: boolean
6559
  @param force_create: whether to force creation of this device; this
6560
      will be change to True whenever we find a device which has
6561
      CreateOnSecondary() attribute
6562
  @param info: the extra 'metadata' we should attach to the device
6563
      (this will be represented as a LVM tag)
6564
  @type force_open: boolean
6565
  @param force_open: this parameter will be passes to the
6566
      L{backend.BlockdevCreate} function where it specifies
6567
      whether we run on primary or not, and it affects both
6568
      the child assembly and the device own Open() execution
6569

6570
  """
6571
  if device.CreateOnSecondary():
6572
    force_create = True
6573

    
6574
  if device.children:
6575
    for child in device.children:
6576
      _CreateBlockDev(lu, node, instance, child, force_create,
6577
                      info, force_open)
6578

    
6579
  if not force_create:
6580
    return
6581

    
6582
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
6583

    
6584

    
6585
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
6586
  """Create a single block device on a given node.
6587

6588
  This will not recurse over children of the device, so they must be
6589
  created in advance.
6590

6591
  @param lu: the lu on whose behalf we execute
6592
  @param node: the node on which to create the device
6593
  @type instance: L{objects.Instance}
6594
  @param instance: the instance which owns the device
6595
  @type device: L{objects.Disk}
6596
  @param device: the device to create
6597
  @param info: the extra 'metadata' we should attach to the device
6598
      (this will be represented as a LVM tag)
6599
  @type force_open: boolean
6600
  @param force_open: this parameter will be passes to the
6601
      L{backend.BlockdevCreate} function where it specifies
6602
      whether we run on primary or not, and it affects both
6603
      the child assembly and the device own Open() execution
6604

6605
  """
6606
  lu.cfg.SetDiskID(device, node)
6607
  result = lu.rpc.call_blockdev_create(node, device, device.size,
6608
                                       instance.name, force_open, info)
6609
  result.Raise("Can't create block device %s on"
6610
               " node %s for instance %s" % (device, node, instance.name))
6611
  if device.physical_id is None:
6612
    device.physical_id = result.payload
6613

    
6614

    
6615
def _GenerateUniqueNames(lu, exts):
6616
  """Generate a suitable LV name.
6617

6618
  This will generate a logical volume name for the given instance.
6619

6620
  """
6621
  results = []
6622
  for val in exts:
6623
    new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
6624
    results.append("%s%s" % (new_id, val))
6625
  return results
6626

    
6627

    
6628
def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names,
6629
                         iv_name, p_minor, s_minor):
6630
  """Generate a drbd8 device complete with its children.
6631

6632
  """
6633
  assert len(vgnames) == len(names) == 2
6634
  port = lu.cfg.AllocatePort()
6635
  shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
6636
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
6637
                          logical_id=(vgnames[0], names[0]))
6638
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
6639
                          logical_id=(vgnames[1], names[1]))
6640
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
6641
                          logical_id=(primary, secondary, port,
6642
                                      p_minor, s_minor,
6643
                                      shared_secret),
6644
                          children=[dev_data, dev_meta],
6645
                          iv_name=iv_name)
6646
  return drbd_dev
6647

    
6648

    
6649
def _GenerateDiskTemplate(lu, template_name,
6650
                          instance_name, primary_node,
6651
                          secondary_nodes, disk_info,
6652
                          file_storage_dir, file_driver,
6653
                          base_index, feedback_fn):
6654
  """Generate the entire disk layout for a given template type.
6655

6656
  """
6657
  #TODO: compute space requirements
6658

    
6659
  vgname = lu.cfg.GetVGName()
6660
  disk_count = len(disk_info)
6661
  disks = []
6662
  if template_name == constants.DT_DISKLESS:
6663
    pass
6664
  elif template_name == constants.DT_PLAIN:
6665
    if len(secondary_nodes) != 0:
6666
      raise errors.ProgrammerError("Wrong template configuration")
6667

    
6668
    names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
6669
                                      for i in range(disk_count)])
6670
    for idx, disk in enumerate(disk_info):
6671
      disk_index = idx + base_index
6672
      vg = disk.get("vg", vgname)
6673
      feedback_fn("* disk %i, vg %s, name %s" % (idx, vg, names[idx]))
6674
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
6675
                              logical_id=(vg, names[idx]),
6676
                              iv_name="disk/%d" % disk_index,
6677
                              mode=disk["mode"])
6678
      disks.append(disk_dev)
6679
  elif template_name == constants.DT_DRBD8:
6680
    if len(secondary_nodes) != 1:
6681
      raise errors.ProgrammerError("Wrong template configuration")
6682
    remote_node = secondary_nodes[0]
6683
    minors = lu.cfg.AllocateDRBDMinor(
6684
      [primary_node, remote_node] * len(disk_info), instance_name)
6685

    
6686
    names = []
6687
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
6688
                                               for i in range(disk_count)]):
6689
      names.append(lv_prefix + "_data")
6690
      names.append(lv_prefix + "_meta")
6691
    for idx, disk in enumerate(disk_info):
6692
      disk_index = idx + base_index
6693
      data_vg = disk.get("vg", vgname)
6694
      meta_vg = disk.get("metavg", data_vg)
6695
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
6696
                                      disk["size"], [data_vg, meta_vg],
6697
                                      names[idx*2:idx*2+2],
6698
                                      "disk/%d" % disk_index,
6699
                                      minors[idx*2], minors[idx*2+1])
6700
      disk_dev.mode = disk["mode"]
6701
      disks.append(disk_dev)
6702
  elif template_name == constants.DT_FILE:
6703
    if len(secondary_nodes) != 0:
6704
      raise errors.ProgrammerError("Wrong template configuration")
6705

    
6706
    opcodes.RequireFileStorage()
6707

    
6708
    for idx, disk in enumerate(disk_info):
6709
      disk_index = idx + base_index
6710
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
6711
                              iv_name="disk/%d" % disk_index,
6712
                              logical_id=(file_driver,
6713
                                          "%s/disk%d" % (file_storage_dir,
6714
                                                         disk_index)),
6715
                              mode=disk["mode"])
6716
      disks.append(disk_dev)
6717
  else:
6718
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
6719
  return disks
6720

    
6721

    
6722
def _GetInstanceInfoText(instance):
6723
  """Compute that text that should be added to the disk's metadata.
6724

6725
  """
6726
  return "originstname+%s" % instance.name
6727

    
6728

    
6729
def _CalcEta(time_taken, written, total_size):
6730
  """Calculates the ETA based on size written and total size.
6731

6732
  @param time_taken: The time taken so far
6733
  @param written: amount written so far
6734
  @param total_size: The total size of data to be written
6735
  @return: The remaining time in seconds
6736

6737
  """
6738
  avg_time = time_taken / float(written)
6739
  return (total_size - written) * avg_time
6740

    
6741

    
6742
def _WipeDisks(lu, instance):
6743
  """Wipes instance disks.
6744

6745
  @type lu: L{LogicalUnit}
6746
  @param lu: the logical unit on whose behalf we execute
6747
  @type instance: L{objects.Instance}
6748
  @param instance: the instance whose disks we should create
6749
  @return: the success of the wipe
6750

6751
  """
6752
  node = instance.primary_node
6753

    
6754
  for device in instance.disks:
6755
    lu.cfg.SetDiskID(device, node)
6756

    
6757
  logging.info("Pause sync of instance %s disks", instance.name)
6758
  result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, True)
6759

    
6760
  for idx, success in enumerate(result.payload):
6761
    if not success:
6762
      logging.warn("pause-sync of instance %s for disks %d failed",
6763
                   instance.name, idx)
6764

    
6765
  try:
6766
    for idx, device in enumerate(instance.disks):
6767
      # The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
6768
      # MAX_WIPE_CHUNK at max
6769
      wipe_chunk_size = min(constants.MAX_WIPE_CHUNK, device.size / 100.0 *
6770
                            constants.MIN_WIPE_CHUNK_PERCENT)
6771
      # we _must_ make this an int, otherwise rounding errors will
6772
      # occur
6773
      wipe_chunk_size = int(wipe_chunk_size)
6774

    
6775
      lu.LogInfo("* Wiping disk %d", idx)
6776
      logging.info("Wiping disk %d for instance %s, node %s using"
6777
                   " chunk size %s", idx, instance.name, node, wipe_chunk_size)
6778

    
6779
      offset = 0
6780
      size = device.size
6781
      last_output = 0
6782
      start_time = time.time()
6783

    
6784
      while offset < size:
6785
        wipe_size = min(wipe_chunk_size, size - offset)
6786
        logging.debug("Wiping disk %d, offset %s, chunk %s",
6787
                      idx, offset, wipe_size)
6788
        result = lu.rpc.call_blockdev_wipe(node, device, offset, wipe_size)
6789
        result.Raise("Could not wipe disk %d at offset %d for size %d" %
6790
                     (idx, offset, wipe_size))
6791
        now = time.time()
6792
        offset += wipe_size
6793
        if now - last_output >= 60:
6794
          eta = _CalcEta(now - start_time, offset, size)
6795
          lu.LogInfo(" - done: %.1f%% ETA: %s" %
6796
                     (offset / float(size) * 100, utils.FormatSeconds(eta)))
6797
          last_output = now
6798
  finally:
6799
    logging.info("Resume sync of instance %s disks", instance.name)
6800

    
6801
    result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, False)
6802

    
6803
    for idx, success in enumerate(result.payload):
6804
      if not success:
6805
        lu.LogWarning("Warning: Resume sync of disk %d failed. Please have a"
6806
                      " look at the status and troubleshoot the issue.", idx)
6807
        logging.warn("resume-sync of instance %s for disks %d failed",
6808
                     instance.name, idx)
6809

    
6810

    
6811
def _CreateDisks(lu, instance, to_skip=None, target_node=None):
6812
  """Create all disks for an instance.
6813

6814
  This abstracts away some work from AddInstance.
6815

6816
  @type lu: L{LogicalUnit}
6817
  @param lu: the logical unit on whose behalf we execute
6818
  @type instance: L{objects.Instance}
6819
  @param instance: the instance whose disks we should create
6820
  @type to_skip: list
6821
  @param to_skip: list of indices to skip
6822
  @type target_node: string
6823
  @param target_node: if passed, overrides the target node for creation
6824
  @rtype: boolean
6825
  @return: the success of the creation
6826

6827
  """
6828
  info = _GetInstanceInfoText(instance)
6829
  if target_node is None:
6830
    pnode = instance.primary_node
6831
    all_nodes = instance.all_nodes
6832
  else:
6833
    pnode = target_node
6834
    all_nodes = [pnode]
6835

    
6836
  if instance.disk_template == constants.DT_FILE:
6837
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
6838
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
6839

    
6840
    result.Raise("Failed to create directory '%s' on"
6841
                 " node %s" % (file_storage_dir, pnode))
6842

    
6843
  # Note: this needs to be kept in sync with adding of disks in
6844
  # LUInstanceSetParams
6845
  for idx, device in enumerate(instance.disks):
6846
    if to_skip and idx in to_skip:
6847
      continue
6848
    logging.info("Creating volume %s for instance %s",
6849
                 device.iv_name, instance.name)
6850
    #HARDCODE
6851
    for node in all_nodes:
6852
      f_create = node == pnode
6853
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
6854

    
6855

    
6856
def _RemoveDisks(lu, instance, target_node=None):
6857
  """Remove all disks for an instance.
6858

6859
  This abstracts away some work from `AddInstance()` and
6860
  `RemoveInstance()`. Note that in case some of the devices couldn't
6861
  be removed, the removal will continue with the other ones (compare
6862
  with `_CreateDisks()`).
6863

6864
  @type lu: L{LogicalUnit}
6865
  @param lu: the logical unit on whose behalf we execute
6866
  @type instance: L{objects.Instance}
6867
  @param instance: the instance whose disks we should remove
6868
  @type target_node: string
6869
  @param target_node: used to override the node on which to remove the disks
6870
  @rtype: boolean
6871
  @return: the success of the removal
6872

6873
  """
6874
  logging.info("Removing block devices for instance %s", instance.name)
6875

    
6876
  all_result = True
6877
  for device in instance.disks:
6878
    if target_node:
6879
      edata = [(target_node, device)]
6880
    else:
6881
      edata = device.ComputeNodeTree(instance.primary_node)
6882
    for node, disk in edata:
6883
      lu.cfg.SetDiskID(disk, node)
6884
      msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
6885
      if msg:
6886
        lu.LogWarning("Could not remove block device %s on node %s,"
6887
                      " continuing anyway: %s", device.iv_name, node, msg)
6888
        all_result = False
6889

    
6890
  if instance.disk_template == constants.DT_FILE:
6891
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
6892
    if target_node:
6893
      tgt = target_node
6894
    else:
6895
      tgt = instance.primary_node
6896
    result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
6897
    if result.fail_msg:
6898
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
6899
                    file_storage_dir, instance.primary_node, result.fail_msg)
6900
      all_result = False
6901

    
6902
  return all_result
6903

    
6904

    
6905
def _ComputeDiskSizePerVG(disk_template, disks):
6906
  """Compute disk size requirements in the volume group
6907

6908
  """
6909
  def _compute(disks, payload):
6910
    """Universal algorithm
6911

6912
    """
6913
    vgs = {}
6914
    for disk in disks:
6915
      vgs[disk["vg"]] = vgs.get("vg", 0) + disk["size"] + payload
6916

    
6917
    return vgs
6918

    
6919
  # Required free disk space as a function of disk and swap space
6920
  req_size_dict = {
6921
    constants.DT_DISKLESS: {},
6922
    constants.DT_PLAIN: _compute(disks, 0),
6923
    # 128 MB are added for drbd metadata for each disk
6924
    constants.DT_DRBD8: _compute(disks, 128),
6925
    constants.DT_FILE: {},
6926
  }
6927

    
6928
  if disk_template not in req_size_dict:
6929
    raise errors.ProgrammerError("Disk template '%s' size requirement"
6930
                                 " is unknown" %  disk_template)
6931

    
6932
  return req_size_dict[disk_template]
6933

    
6934

    
6935
def _ComputeDiskSize(disk_template, disks):
6936
  """Compute disk size requirements in the volume group
6937

6938
  """
6939
  # Required free disk space as a function of disk and swap space
6940
  req_size_dict = {
6941
    constants.DT_DISKLESS: None,
6942
    constants.DT_PLAIN: sum(d["size"] for d in disks),
6943
    # 128 MB are added for drbd metadata for each disk
6944
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
6945
    constants.DT_FILE: None,
6946
  }
6947

    
6948
  if disk_template not in req_size_dict:
6949
    raise errors.ProgrammerError("Disk template '%s' size requirement"
6950
                                 " is unknown" %  disk_template)
6951

    
6952
  return req_size_dict[disk_template]
6953

    
6954

    
6955
def _FilterVmNodes(lu, nodenames):
6956
  """Filters out non-vm_capable nodes from a list.
6957

6958
  @type lu: L{LogicalUnit}
6959
  @param lu: the logical unit for which we check
6960
  @type nodenames: list
6961
  @param nodenames: the list of nodes on which we should check
6962
  @rtype: list
6963
  @return: the list of vm-capable nodes
6964

6965
  """
6966
  vm_nodes = frozenset(lu.cfg.GetNonVmCapableNodeList())
6967
  return [name for name in nodenames if name not in vm_nodes]
6968

    
6969

    
6970
def _CheckHVParams(lu, nodenames, hvname, hvparams):
6971
  """Hypervisor parameter validation.
6972

6973
  This function abstract the hypervisor parameter validation to be
6974
  used in both instance create and instance modify.
6975

6976
  @type lu: L{LogicalUnit}
6977
  @param lu: the logical unit for which we check
6978
  @type nodenames: list
6979
  @param nodenames: the list of nodes on which we should check
6980
  @type hvname: string
6981
  @param hvname: the name of the hypervisor we should use
6982
  @type hvparams: dict
6983
  @param hvparams: the parameters which we need to check
6984
  @raise errors.OpPrereqError: if the parameters are not valid
6985

6986
  """
6987
  nodenames = _FilterVmNodes(lu, nodenames)
6988
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
6989
                                                  hvname,
6990
                                                  hvparams)
6991
  for node in nodenames:
6992
    info = hvinfo[node]
6993
    if info.offline:
6994
      continue
6995
    info.Raise("Hypervisor parameter validation failed on node %s" % node)
6996

    
6997

    
6998
def _CheckOSParams(lu, required, nodenames, osname, osparams):
6999
  """OS parameters validation.
7000

7001
  @type lu: L{LogicalUnit}
7002
  @param lu: the logical unit for which we check
7003
  @type required: boolean
7004
  @param required: whether the validation should fail if the OS is not
7005
      found
7006
  @type nodenames: list
7007
  @param nodenames: the list of nodes on which we should check
7008
  @type osname: string
7009
  @param osname: the name of the hypervisor we should use
7010
  @type osparams: dict
7011
  @param osparams: the parameters which we need to check
7012
  @raise errors.OpPrereqError: if the parameters are not valid
7013

7014
  """
7015
  nodenames = _FilterVmNodes(lu, nodenames)
7016
  result = lu.rpc.call_os_validate(required, nodenames, osname,
7017
                                   [constants.OS_VALIDATE_PARAMETERS],
7018
                                   osparams)
7019
  for node, nres in result.items():
7020
    # we don't check for offline cases since this should be run only
7021
    # against the master node and/or an instance's nodes
7022
    nres.Raise("OS Parameters validation failed on node %s" % node)
7023
    if not nres.payload:
7024
      lu.LogInfo("OS %s not found on node %s, validation skipped",
7025
                 osname, node)
7026

    
7027

    
7028
class LUInstanceCreate(LogicalUnit):
7029
  """Create an instance.
7030

7031
  """
7032
  HPATH = "instance-add"
7033
  HTYPE = constants.HTYPE_INSTANCE
7034
  REQ_BGL = False
7035

    
7036
  def CheckArguments(self):
7037
    """Check arguments.
7038

7039
    """
7040
    # do not require name_check to ease forward/backward compatibility
7041
    # for tools
7042
    if self.op.no_install and self.op.start:
7043
      self.LogInfo("No-installation mode selected, disabling startup")
7044
      self.op.start = False
7045
    # validate/normalize the instance name
7046
    self.op.instance_name = \
7047
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
7048

    
7049
    if self.op.ip_check and not self.op.name_check:
7050
      # TODO: make the ip check more flexible and not depend on the name check
7051
      raise errors.OpPrereqError("Cannot do ip check without a name check",
7052
                                 errors.ECODE_INVAL)
7053

    
7054
    # check nics' parameter names
7055
    for nic in self.op.nics:
7056
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
7057

    
7058
    # check disks. parameter names and consistent adopt/no-adopt strategy
7059
    has_adopt = has_no_adopt = False
7060
    for disk in self.op.disks:
7061
      utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
7062
      if "adopt" in disk:
7063
        has_adopt = True
7064
      else:
7065
        has_no_adopt = True
7066
    if has_adopt and has_no_adopt:
7067
      raise errors.OpPrereqError("Either all disks are adopted or none is",
7068
                                 errors.ECODE_INVAL)
7069
    if has_adopt:
7070
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
7071
        raise errors.OpPrereqError("Disk adoption is not supported for the"
7072
                                   " '%s' disk template" %
7073
                                   self.op.disk_template,
7074
                                   errors.ECODE_INVAL)
7075
      if self.op.iallocator is not None:
7076
        raise errors.OpPrereqError("Disk adoption not allowed with an"
7077
                                   " iallocator script", errors.ECODE_INVAL)
7078
      if self.op.mode == constants.INSTANCE_IMPORT:
7079
        raise errors.OpPrereqError("Disk adoption not allowed for"
7080
                                   " instance import", errors.ECODE_INVAL)
7081

    
7082
    self.adopt_disks = has_adopt
7083

    
7084
    # instance name verification
7085
    if self.op.name_check:
7086
      self.hostname1 = netutils.GetHostname(name=self.op.instance_name)
7087
      self.op.instance_name = self.hostname1.name
7088
      # used in CheckPrereq for ip ping check
7089
      self.check_ip = self.hostname1.ip
7090
    else:
7091
      self.check_ip = None
7092

    
7093
    # file storage checks
7094
    if (self.op.file_driver and
7095
        not self.op.file_driver in constants.FILE_DRIVER):
7096
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
7097
                                 self.op.file_driver, errors.ECODE_INVAL)
7098

    
7099
    if self.op.disk_template == constants.DT_FILE:
7100
      opcodes.RequireFileStorage()
7101

    
7102
    ### Node/iallocator related checks
7103
    _CheckIAllocatorOrNode(self, "iallocator", "pnode")
7104

    
7105
    if self.op.pnode is not None:
7106
      if self.op.disk_template in constants.DTS_NET_MIRROR:
7107
        if self.op.snode is None:
7108
          raise errors.OpPrereqError("The networked disk templates need"
7109
                                     " a mirror node", errors.ECODE_INVAL)
7110
      elif self.op.snode:
7111
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
7112
                        " template")
7113
        self.op.snode = None
7114

    
7115
    self._cds = _GetClusterDomainSecret()
7116

    
7117
    if self.op.mode == constants.INSTANCE_IMPORT:
7118
      # On import force_variant must be True, because if we forced it at
7119
      # initial install, our only chance when importing it back is that it
7120
      # works again!
7121
      self.op.force_variant = True
7122

    
7123
      if self.op.no_install:
7124
        self.LogInfo("No-installation mode has no effect during import")
7125

    
7126
    elif self.op.mode == constants.INSTANCE_CREATE:
7127
      if self.op.os_type is None:
7128
        raise errors.OpPrereqError("No guest OS specified",
7129
                                   errors.ECODE_INVAL)
7130
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
7131
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
7132
                                   " installation" % self.op.os_type,
7133
                                   errors.ECODE_STATE)
7134
      if self.op.disk_template is None:
7135
        raise errors.OpPrereqError("No disk template specified",
7136
                                   errors.ECODE_INVAL)
7137

    
7138
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
7139
      # Check handshake to ensure both clusters have the same domain secret
7140
      src_handshake = self.op.source_handshake
7141
      if not src_handshake:
7142
        raise errors.OpPrereqError("Missing source handshake",
7143
                                   errors.ECODE_INVAL)
7144

    
7145
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
7146
                                                           src_handshake)
7147
      if errmsg:
7148
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
7149
                                   errors.ECODE_INVAL)
7150

    
7151
      # Load and check source CA
7152
      self.source_x509_ca_pem = self.op.source_x509_ca
7153
      if not self.source_x509_ca_pem:
7154
        raise errors.OpPrereqError("Missing source X509 CA",
7155
                                   errors.ECODE_INVAL)
7156

    
7157
      try:
7158
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
7159
                                                    self._cds)
7160
      except OpenSSL.crypto.Error, err:
7161
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
7162
                                   (err, ), errors.ECODE_INVAL)
7163

    
7164
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
7165
      if errcode is not None:
7166
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
7167
                                   errors.ECODE_INVAL)
7168

    
7169
      self.source_x509_ca = cert
7170

    
7171
      src_instance_name = self.op.source_instance_name
7172
      if not src_instance_name:
7173
        raise errors.OpPrereqError("Missing source instance name",
7174
                                   errors.ECODE_INVAL)
7175

    
7176
      self.source_instance_name = \
7177
          netutils.GetHostname(name=src_instance_name).name
7178

    
7179
    else:
7180
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
7181
                                 self.op.mode, errors.ECODE_INVAL)
7182

    
7183
  def ExpandNames(self):
7184
    """ExpandNames for CreateInstance.
7185

7186
    Figure out the right locks for instance creation.
7187

7188
    """
7189
    self.needed_locks = {}
7190

    
7191
    instance_name = self.op.instance_name
7192
    # this is just a preventive check, but someone might still add this
7193
    # instance in the meantime, and creation will fail at lock-add time
7194
    if instance_name in self.cfg.GetInstanceList():
7195
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
7196
                                 instance_name, errors.ECODE_EXISTS)
7197

    
7198
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
7199

    
7200
    if self.op.iallocator:
7201
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7202
    else:
7203
      self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
7204
      nodelist = [self.op.pnode]
7205
      if self.op.snode is not None:
7206
        self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
7207
        nodelist.append(self.op.snode)
7208
      self.needed_locks[locking.LEVEL_NODE] = nodelist
7209

    
7210
    # in case of import lock the source node too
7211
    if self.op.mode == constants.INSTANCE_IMPORT:
7212
      src_node = self.op.src_node
7213
      src_path = self.op.src_path
7214

    
7215
      if src_path is None:
7216
        self.op.src_path = src_path = self.op.instance_name
7217

    
7218
      if src_node is None:
7219
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7220
        self.op.src_node = None
7221
        if os.path.isabs(src_path):
7222
          raise errors.OpPrereqError("Importing an instance from an absolute"
7223
                                     " path requires a source node option.",
7224
                                     errors.ECODE_INVAL)
7225
      else:
7226
        self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
7227
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
7228
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
7229
        if not os.path.isabs(src_path):
7230
          self.op.src_path = src_path = \
7231
            utils.PathJoin(constants.EXPORT_DIR, src_path)
7232

    
7233
  def _RunAllocator(self):
7234
    """Run the allocator based on input opcode.
7235

7236
    """
7237
    nics = [n.ToDict() for n in self.nics]
7238
    ial = IAllocator(self.cfg, self.rpc,
7239
                     mode=constants.IALLOCATOR_MODE_ALLOC,
7240
                     name=self.op.instance_name,
7241
                     disk_template=self.op.disk_template,
7242
                     tags=[],
7243
                     os=self.op.os_type,
7244
                     vcpus=self.be_full[constants.BE_VCPUS],
7245
                     mem_size=self.be_full[constants.BE_MEMORY],
7246
                     disks=self.disks,
7247
                     nics=nics,
7248
                     hypervisor=self.op.hypervisor,
7249
                     )
7250

    
7251
    ial.Run(self.op.iallocator)
7252

    
7253
    if not ial.success:
7254
      raise errors.OpPrereqError("Can't compute nodes using"
7255
                                 " iallocator '%s': %s" %
7256
                                 (self.op.iallocator, ial.info),
7257
                                 errors.ECODE_NORES)
7258
    if len(ial.result) != ial.required_nodes:
7259
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
7260
                                 " of nodes (%s), required %s" %
7261
                                 (self.op.iallocator, len(ial.result),
7262
                                  ial.required_nodes), errors.ECODE_FAULT)
7263
    self.op.pnode = ial.result[0]
7264
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
7265
                 self.op.instance_name, self.op.iallocator,
7266
                 utils.CommaJoin(ial.result))
7267
    if ial.required_nodes == 2:
7268
      self.op.snode = ial.result[1]
7269

    
7270
  def BuildHooksEnv(self):
7271
    """Build hooks env.
7272

7273
    This runs on master, primary and secondary nodes of the instance.
7274

7275
    """
7276
    env = {
7277
      "ADD_MODE": self.op.mode,
7278
      }
7279
    if self.op.mode == constants.INSTANCE_IMPORT:
7280
      env["SRC_NODE"] = self.op.src_node
7281
      env["SRC_PATH"] = self.op.src_path
7282
      env["SRC_IMAGES"] = self.src_images
7283

    
7284
    env.update(_BuildInstanceHookEnv(
7285
      name=self.op.instance_name,
7286
      primary_node=self.op.pnode,
7287
      secondary_nodes=self.secondaries,
7288
      status=self.op.start,
7289
      os_type=self.op.os_type,
7290
      memory=self.be_full[constants.BE_MEMORY],
7291
      vcpus=self.be_full[constants.BE_VCPUS],
7292
      nics=_NICListToTuple(self, self.nics),
7293
      disk_template=self.op.disk_template,
7294
      disks=[(d["size"], d["mode"]) for d in self.disks],
7295
      bep=self.be_full,
7296
      hvp=self.hv_full,
7297
      hypervisor_name=self.op.hypervisor,
7298
    ))
7299

    
7300
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
7301
          self.secondaries)
7302
    return env, nl, nl
7303

    
7304
  def _ReadExportInfo(self):
7305
    """Reads the export information from disk.
7306

7307
    It will override the opcode source node and path with the actual
7308
    information, if these two were not specified before.
7309

7310
    @return: the export information
7311

7312
    """
7313
    assert self.op.mode == constants.INSTANCE_IMPORT
7314

    
7315
    src_node = self.op.src_node
7316
    src_path = self.op.src_path
7317

    
7318
    if src_node is None:
7319
      locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
7320
      exp_list = self.rpc.call_export_list(locked_nodes)
7321
      found = False
7322
      for node in exp_list:
7323
        if exp_list[node].fail_msg:
7324
          continue
7325
        if src_path in exp_list[node].payload:
7326
          found = True
7327
          self.op.src_node = src_node = node
7328
          self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
7329
                                                       src_path)
7330
          break
7331
      if not found:
7332
        raise errors.OpPrereqError("No export found for relative path %s" %
7333
                                    src_path, errors.ECODE_INVAL)
7334

    
7335
    _CheckNodeOnline(self, src_node)
7336
    result = self.rpc.call_export_info(src_node, src_path)
7337
    result.Raise("No export or invalid export found in dir %s" % src_path)
7338

    
7339
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
7340
    if not export_info.has_section(constants.INISECT_EXP):
7341
      raise errors.ProgrammerError("Corrupted export config",
7342
                                   errors.ECODE_ENVIRON)
7343

    
7344
    ei_version = export_info.get(constants.INISECT_EXP, "version")
7345
    if (int(ei_version) != constants.EXPORT_VERSION):
7346
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
7347
                                 (ei_version, constants.EXPORT_VERSION),
7348
                                 errors.ECODE_ENVIRON)
7349
    return export_info
7350

    
7351
  def _ReadExportParams(self, einfo):
7352
    """Use export parameters as defaults.
7353

7354
    In case the opcode doesn't specify (as in override) some instance
7355
    parameters, then try to use them from the export information, if
7356
    that declares them.
7357

7358
    """
7359
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
7360

    
7361
    if self.op.disk_template is None:
7362
      if einfo.has_option(constants.INISECT_INS, "disk_template"):
7363
        self.op.disk_template = einfo.get(constants.INISECT_INS,
7364
                                          "disk_template")
7365
      else:
7366
        raise errors.OpPrereqError("No disk template specified and the export"
7367
                                   " is missing the disk_template information",
7368
                                   errors.ECODE_INVAL)
7369

    
7370
    if not self.op.disks:
7371
      if einfo.has_option(constants.INISECT_INS, "disk_count"):
7372
        disks = []
7373
        # TODO: import the disk iv_name too
7374
        for idx in range(einfo.getint(constants.INISECT_INS, "disk_count")):
7375
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
7376
          disks.append({"size": disk_sz})
7377
        self.op.disks = disks
7378
      else:
7379
        raise errors.OpPrereqError("No disk info specified and the export"
7380
                                   " is missing the disk information",
7381
                                   errors.ECODE_INVAL)
7382

    
7383
    if (not self.op.nics and
7384
        einfo.has_option(constants.INISECT_INS, "nic_count")):
7385
      nics = []
7386
      for idx in range(einfo.getint(constants.INISECT_INS, "nic_count")):
7387
        ndict = {}
7388
        for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
7389
          v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
7390
          ndict[name] = v
7391
        nics.append(ndict)
7392
      self.op.nics = nics
7393

    
7394
    if (self.op.hypervisor is None and
7395
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
7396
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
7397
    if einfo.has_section(constants.INISECT_HYP):
7398
      # use the export parameters but do not override the ones
7399
      # specified by the user
7400
      for name, value in einfo.items(constants.INISECT_HYP):
7401
        if name not in self.op.hvparams:
7402
          self.op.hvparams[name] = value
7403

    
7404
    if einfo.has_section(constants.INISECT_BEP):
7405
      # use the parameters, without overriding
7406
      for name, value in einfo.items(constants.INISECT_BEP):
7407
        if name not in self.op.beparams:
7408
          self.op.beparams[name] = value
7409
    else:
7410
      # try to read the parameters old style, from the main section
7411
      for name in constants.BES_PARAMETERS:
7412
        if (name not in self.op.beparams and
7413
            einfo.has_option(constants.INISECT_INS, name)):
7414
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
7415

    
7416
    if einfo.has_section(constants.INISECT_OSP):
7417
      # use the parameters, without overriding
7418
      for name, value in einfo.items(constants.INISECT_OSP):
7419
        if name not in self.op.osparams:
7420
          self.op.osparams[name] = value
7421

    
7422
  def _RevertToDefaults(self, cluster):
7423
    """Revert the instance parameters to the default values.
7424

7425
    """
7426
    # hvparams
7427
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
7428
    for name in self.op.hvparams.keys():
7429
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
7430
        del self.op.hvparams[name]
7431
    # beparams
7432
    be_defs = cluster.SimpleFillBE({})
7433
    for name in self.op.beparams.keys():
7434
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
7435
        del self.op.beparams[name]
7436
    # nic params
7437
    nic_defs = cluster.SimpleFillNIC({})
7438
    for nic in self.op.nics:
7439
      for name in constants.NICS_PARAMETERS:
7440
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
7441
          del nic[name]
7442
    # osparams
7443
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
7444
    for name in self.op.osparams.keys():
7445
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
7446
        del self.op.osparams[name]
7447

    
7448
  def _CalculateFileStorageDir(self):
7449
    """Calculate final instance file storage dir.
7450

7451
    """
7452
    # file storage dir calculation/check
7453
    self.instance_file_storage_dir = None
7454
    if self.op.disk_template == constants.DT_FILE:
7455
      # build the full file storage dir path
7456
      joinargs = []
7457

    
7458
      cfg_storagedir = self.cfg.GetFileStorageDir()
7459
      if not cfg_storagedir:
7460
        raise errors.OpPrereqError("Cluster file storage dir not defined")
7461
      joinargs.append(cfg_storagedir)
7462

    
7463
      if self.op.file_storage_dir is not None:
7464
        joinargs.append(self.op.file_storage_dir)
7465

    
7466
      joinargs.append(self.op.instance_name)
7467

    
7468
      # pylint: disable-msg=W0142
7469
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
7470

    
7471
  def CheckPrereq(self):
7472
    """Check prerequisites.
7473

7474
    """
7475
    self._CalculateFileStorageDir()
7476

    
7477
    if self.op.mode == constants.INSTANCE_IMPORT:
7478
      export_info = self._ReadExportInfo()
7479
      self._ReadExportParams(export_info)
7480

    
7481
    if (not self.cfg.GetVGName() and
7482
        self.op.disk_template not in constants.DTS_NOT_LVM):
7483
      raise errors.OpPrereqError("Cluster does not support lvm-based"
7484
                                 " instances", errors.ECODE_STATE)
7485

    
7486
    if self.op.hypervisor is None:
7487
      self.op.hypervisor = self.cfg.GetHypervisorType()
7488

    
7489
    cluster = self.cfg.GetClusterInfo()
7490
    enabled_hvs = cluster.enabled_hypervisors
7491
    if self.op.hypervisor not in enabled_hvs:
7492
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
7493
                                 " cluster (%s)" % (self.op.hypervisor,
7494
                                  ",".join(enabled_hvs)),
7495
                                 errors.ECODE_STATE)
7496

    
7497
    # check hypervisor parameter syntax (locally)
7498
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
7499
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
7500
                                      self.op.hvparams)
7501
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
7502
    hv_type.CheckParameterSyntax(filled_hvp)
7503
    self.hv_full = filled_hvp
7504
    # check that we don't specify global parameters on an instance
7505
    _CheckGlobalHvParams(self.op.hvparams)
7506

    
7507
    # fill and remember the beparams dict
7508
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
7509
    self.be_full = cluster.SimpleFillBE(self.op.beparams)
7510

    
7511
    # build os parameters
7512
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
7513

    
7514
    # now that hvp/bep are in final format, let's reset to defaults,
7515
    # if told to do so
7516
    if self.op.identify_defaults:
7517
      self._RevertToDefaults(cluster)
7518

    
7519
    # NIC buildup
7520
    self.nics = []
7521
    for idx, nic in enumerate(self.op.nics):
7522
      nic_mode_req = nic.get("mode", None)
7523
      nic_mode = nic_mode_req
7524
      if nic_mode is None:
7525
        nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
7526

    
7527
      # in routed mode, for the first nic, the default ip is 'auto'
7528
      if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
7529
        default_ip_mode = constants.VALUE_AUTO
7530
      else:
7531
        default_ip_mode = constants.VALUE_NONE
7532

    
7533
      # ip validity checks
7534
      ip = nic.get("ip", default_ip_mode)
7535
      if ip is None or ip.lower() == constants.VALUE_NONE:
7536
        nic_ip = None
7537
      elif ip.lower() == constants.VALUE_AUTO:
7538
        if not self.op.name_check:
7539
          raise errors.OpPrereqError("IP address set to auto but name checks"
7540
                                     " have been skipped",
7541
                                     errors.ECODE_INVAL)
7542
        nic_ip = self.hostname1.ip
7543
      else:
7544
        if not netutils.IPAddress.IsValid(ip):
7545
          raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
7546
                                     errors.ECODE_INVAL)
7547
        nic_ip = ip
7548

    
7549
      # TODO: check the ip address for uniqueness
7550
      if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
7551
        raise errors.OpPrereqError("Routed nic mode requires an ip address",
7552
                                   errors.ECODE_INVAL)
7553

    
7554
      # MAC address verification
7555
      mac = nic.get("mac", constants.VALUE_AUTO)
7556
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7557
        mac = utils.NormalizeAndValidateMac(mac)
7558

    
7559
        try:
7560
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
7561
        except errors.ReservationError:
7562
          raise errors.OpPrereqError("MAC address %s already in use"
7563
                                     " in cluster" % mac,
7564
                                     errors.ECODE_NOTUNIQUE)
7565

    
7566
      # bridge verification
7567
      bridge = nic.get("bridge", None)
7568
      link = nic.get("link", None)
7569
      if bridge and link:
7570
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
7571
                                   " at the same time", errors.ECODE_INVAL)
7572
      elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
7573
        raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic",
7574
                                   errors.ECODE_INVAL)
7575
      elif bridge:
7576
        link = bridge
7577

    
7578
      nicparams = {}
7579
      if nic_mode_req:
7580
        nicparams[constants.NIC_MODE] = nic_mode_req
7581
      if link:
7582
        nicparams[constants.NIC_LINK] = link
7583

    
7584
      check_params = cluster.SimpleFillNIC(nicparams)
7585
      objects.NIC.CheckParameterSyntax(check_params)
7586
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
7587

    
7588
    # disk checks/pre-build
7589
    self.disks = []
7590
    for disk in self.op.disks:
7591
      mode = disk.get("mode", constants.DISK_RDWR)
7592
      if mode not in constants.DISK_ACCESS_SET:
7593
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
7594
                                   mode, errors.ECODE_INVAL)
7595
      size = disk.get("size", None)
7596
      if size is None:
7597
        raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
7598
      try:
7599
        size = int(size)
7600
      except (TypeError, ValueError):
7601
        raise errors.OpPrereqError("Invalid disk size '%s'" % size,
7602
                                   errors.ECODE_INVAL)
7603
      data_vg = disk.get("vg", self.cfg.GetVGName())
7604
      meta_vg = disk.get("metavg", data_vg)
7605
      new_disk = {"size": size, "mode": mode, "vg": data_vg, "metavg": meta_vg}
7606
      if "adopt" in disk:
7607
        new_disk["adopt"] = disk["adopt"]
7608
      self.disks.append(new_disk)
7609

    
7610
    if self.op.mode == constants.INSTANCE_IMPORT:
7611

    
7612
      # Check that the new instance doesn't have less disks than the export
7613
      instance_disks = len(self.disks)
7614
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
7615
      if instance_disks < export_disks:
7616
        raise errors.OpPrereqError("Not enough disks to import."
7617
                                   " (instance: %d, export: %d)" %
7618
                                   (instance_disks, export_disks),
7619
                                   errors.ECODE_INVAL)
7620

    
7621
      disk_images = []
7622
      for idx in range(export_disks):
7623
        option = 'disk%d_dump' % idx
7624
        if export_info.has_option(constants.INISECT_INS, option):
7625
          # FIXME: are the old os-es, disk sizes, etc. useful?
7626
          export_name = export_info.get(constants.INISECT_INS, option)
7627
          image = utils.PathJoin(self.op.src_path, export_name)
7628
          disk_images.append(image)
7629
        else:
7630
          disk_images.append(False)
7631

    
7632
      self.src_images = disk_images
7633

    
7634
      old_name = export_info.get(constants.INISECT_INS, 'name')
7635
      try:
7636
        exp_nic_count = export_info.getint(constants.INISECT_INS, 'nic_count')
7637
      except (TypeError, ValueError), err:
7638
        raise errors.OpPrereqError("Invalid export file, nic_count is not"
7639
                                   " an integer: %s" % str(err),
7640
                                   errors.ECODE_STATE)
7641
      if self.op.instance_name == old_name:
7642
        for idx, nic in enumerate(self.nics):
7643
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
7644
            nic_mac_ini = 'nic%d_mac' % idx
7645
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
7646

    
7647
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
7648

    
7649
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
7650
    if self.op.ip_check:
7651
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
7652
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
7653
                                   (self.check_ip, self.op.instance_name),
7654
                                   errors.ECODE_NOTUNIQUE)
7655

    
7656
    #### mac address generation
7657
    # By generating here the mac address both the allocator and the hooks get
7658
    # the real final mac address rather than the 'auto' or 'generate' value.
7659
    # There is a race condition between the generation and the instance object
7660
    # creation, which means that we know the mac is valid now, but we're not
7661
    # sure it will be when we actually add the instance. If things go bad
7662
    # adding the instance will abort because of a duplicate mac, and the
7663
    # creation job will fail.
7664
    for nic in self.nics:
7665
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7666
        nic.mac = self.cfg.GenerateMAC(self.proc.GetECId())
7667

    
7668
    #### allocator run
7669

    
7670
    if self.op.iallocator is not None:
7671
      self._RunAllocator()
7672

    
7673
    #### node related checks
7674

    
7675
    # check primary node
7676
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
7677
    assert self.pnode is not None, \
7678
      "Cannot retrieve locked node %s" % self.op.pnode
7679
    if pnode.offline:
7680
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
7681
                                 pnode.name, errors.ECODE_STATE)
7682
    if pnode.drained:
7683
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
7684
                                 pnode.name, errors.ECODE_STATE)
7685
    if not pnode.vm_capable:
7686
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
7687
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
7688

    
7689
    self.secondaries = []
7690

    
7691
    # mirror node verification
7692
    if self.op.disk_template in constants.DTS_NET_MIRROR:
7693
      if self.op.snode == pnode.name:
7694
        raise errors.OpPrereqError("The secondary node cannot be the"
7695
                                   " primary node.", errors.ECODE_INVAL)
7696
      _CheckNodeOnline(self, self.op.snode)
7697
      _CheckNodeNotDrained(self, self.op.snode)
7698
      _CheckNodeVmCapable(self, self.op.snode)
7699
      self.secondaries.append(self.op.snode)
7700

    
7701
    nodenames = [pnode.name] + self.secondaries
7702

    
7703
    if not self.adopt_disks:
7704
      # Check lv size requirements, if not adopting
7705
      req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
7706
      _CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
7707

    
7708
    else: # instead, we must check the adoption data
7709
      all_lvs = set([i["vg"] + "/" + i["adopt"] for i in self.disks])
7710
      if len(all_lvs) != len(self.disks):
7711
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
7712
                                   errors.ECODE_INVAL)
7713
      for lv_name in all_lvs:
7714
        try:
7715
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
7716
          # to ReserveLV uses the same syntax
7717
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
7718
        except errors.ReservationError:
7719
          raise errors.OpPrereqError("LV named %s used by another instance" %
7720
                                     lv_name, errors.ECODE_NOTUNIQUE)
7721

    
7722
      vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
7723
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
7724

    
7725
      node_lvs = self.rpc.call_lv_list([pnode.name],
7726
                                       vg_names.payload.keys())[pnode.name]
7727
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
7728
      node_lvs = node_lvs.payload
7729

    
7730
      delta = all_lvs.difference(node_lvs.keys())
7731
      if delta:
7732
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
7733
                                   utils.CommaJoin(delta),
7734
                                   errors.ECODE_INVAL)
7735
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
7736
      if online_lvs:
7737
        raise errors.OpPrereqError("Online logical volumes found, cannot"
7738
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
7739
                                   errors.ECODE_STATE)
7740
      # update the size of disk based on what is found
7741
      for dsk in self.disks:
7742
        dsk["size"] = int(float(node_lvs[dsk["vg"] + "/" + dsk["adopt"]][0]))
7743

    
7744
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
7745

    
7746
    _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
7747
    # check OS parameters (remotely)
7748
    _CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
7749

    
7750
    _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
7751

    
7752
    # memory check on primary node
7753
    if self.op.start:
7754
      _CheckNodeFreeMemory(self, self.pnode.name,
7755
                           "creating instance %s" % self.op.instance_name,
7756
                           self.be_full[constants.BE_MEMORY],
7757
                           self.op.hypervisor)
7758

    
7759
    self.dry_run_result = list(nodenames)
7760

    
7761
  def Exec(self, feedback_fn):
7762
    """Create and add the instance to the cluster.
7763

7764
    """
7765
    instance = self.op.instance_name
7766
    pnode_name = self.pnode.name
7767

    
7768
    ht_kind = self.op.hypervisor
7769
    if ht_kind in constants.HTS_REQ_PORT:
7770
      network_port = self.cfg.AllocatePort()
7771
    else:
7772
      network_port = None
7773

    
7774
    disks = _GenerateDiskTemplate(self,
7775
                                  self.op.disk_template,
7776
                                  instance, pnode_name,
7777
                                  self.secondaries,
7778
                                  self.disks,
7779
                                  self.instance_file_storage_dir,
7780
                                  self.op.file_driver,
7781
                                  0,
7782
                                  feedback_fn)
7783

    
7784
    iobj = objects.Instance(name=instance, os=self.op.os_type,
7785
                            primary_node=pnode_name,
7786
                            nics=self.nics, disks=disks,
7787
                            disk_template=self.op.disk_template,
7788
                            admin_up=False,
7789
                            network_port=network_port,
7790
                            beparams=self.op.beparams,
7791
                            hvparams=self.op.hvparams,
7792
                            hypervisor=self.op.hypervisor,
7793
                            osparams=self.op.osparams,
7794
                            )
7795

    
7796
    if self.adopt_disks:
7797
      # rename LVs to the newly-generated names; we need to construct
7798
      # 'fake' LV disks with the old data, plus the new unique_id
7799
      tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
7800
      rename_to = []
7801
      for t_dsk, a_dsk in zip (tmp_disks, self.disks):
7802
        rename_to.append(t_dsk.logical_id)
7803
        t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk["adopt"])
7804
        self.cfg.SetDiskID(t_dsk, pnode_name)
7805
      result = self.rpc.call_blockdev_rename(pnode_name,
7806
                                             zip(tmp_disks, rename_to))
7807
      result.Raise("Failed to rename adoped LVs")
7808
    else:
7809
      feedback_fn("* creating instance disks...")
7810
      try:
7811
        _CreateDisks(self, iobj)
7812
      except errors.OpExecError:
7813
        self.LogWarning("Device creation failed, reverting...")
7814
        try:
7815
          _RemoveDisks(self, iobj)
7816
        finally:
7817
          self.cfg.ReleaseDRBDMinors(instance)
7818
          raise
7819

    
7820
    feedback_fn("adding instance %s to cluster config" % instance)
7821

    
7822
    self.cfg.AddInstance(iobj, self.proc.GetECId())
7823

    
7824
    # Declare that we don't want to remove the instance lock anymore, as we've
7825
    # added the instance to the config
7826
    del self.remove_locks[locking.LEVEL_INSTANCE]
7827
    # Unlock all the nodes
7828
    if self.op.mode == constants.INSTANCE_IMPORT:
7829
      nodes_keep = [self.op.src_node]
7830
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
7831
                       if node != self.op.src_node]
7832
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
7833
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
7834
    else:
7835
      self.context.glm.release(locking.LEVEL_NODE)
7836
      del self.acquired_locks[locking.LEVEL_NODE]
7837

    
7838
    disk_abort = False
7839
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
7840
      feedback_fn("* wiping instance disks...")
7841
      try:
7842
        _WipeDisks(self, iobj)
7843
      except errors.OpExecError, err:
7844
        logging.exception("Wiping disks failed")
7845
        self.LogWarning("Wiping instance disks failed (%s)", err)
7846
        disk_abort = True
7847

    
7848
    if disk_abort:
7849
      # Something is already wrong with the disks, don't do anything else
7850
      pass
7851
    elif self.op.wait_for_sync:
7852
      disk_abort = not _WaitForSync(self, iobj)
7853
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
7854
      # make sure the disks are not degraded (still sync-ing is ok)
7855
      time.sleep(15)
7856
      feedback_fn("* checking mirrors status")
7857
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
7858
    else:
7859
      disk_abort = False
7860

    
7861
    if disk_abort:
7862
      _RemoveDisks(self, iobj)
7863
      self.cfg.RemoveInstance(iobj.name)
7864
      # Make sure the instance lock gets removed
7865
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
7866
      raise errors.OpExecError("There are some degraded disks for"
7867
                               " this instance")
7868

    
7869
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
7870
      if self.op.mode == constants.INSTANCE_CREATE:
7871
        if not self.op.no_install:
7872
          feedback_fn("* running the instance OS create scripts...")
7873
          # FIXME: pass debug option from opcode to backend
7874
          result = self.rpc.call_instance_os_add(pnode_name, iobj, False,
7875
                                                 self.op.debug_level)
7876
          result.Raise("Could not add os for instance %s"
7877
                       " on node %s" % (instance, pnode_name))
7878

    
7879
      elif self.op.mode == constants.INSTANCE_IMPORT:
7880
        feedback_fn("* running the instance OS import scripts...")
7881

    
7882
        transfers = []
7883

    
7884
        for idx, image in enumerate(self.src_images):
7885
          if not image:
7886
            continue
7887

    
7888
          # FIXME: pass debug option from opcode to backend
7889
          dt = masterd.instance.DiskTransfer("disk/%s" % idx,
7890
                                             constants.IEIO_FILE, (image, ),
7891
                                             constants.IEIO_SCRIPT,
7892
                                             (iobj.disks[idx], idx),
7893
                                             None)
7894
          transfers.append(dt)
7895

    
7896
        import_result = \
7897
          masterd.instance.TransferInstanceData(self, feedback_fn,
7898
                                                self.op.src_node, pnode_name,
7899
                                                self.pnode.secondary_ip,
7900
                                                iobj, transfers)
7901
        if not compat.all(import_result):
7902
          self.LogWarning("Some disks for instance %s on node %s were not"
7903
                          " imported successfully" % (instance, pnode_name))
7904

    
7905
      elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
7906
        feedback_fn("* preparing remote import...")
7907
        # The source cluster will stop the instance before attempting to make a
7908
        # connection. In some cases stopping an instance can take a long time,
7909
        # hence the shutdown timeout is added to the connection timeout.
7910
        connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
7911
                           self.op.source_shutdown_timeout)
7912
        timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
7913

    
7914
        assert iobj.primary_node == self.pnode.name
7915
        disk_results = \
7916
          masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
7917
                                        self.source_x509_ca,
7918
                                        self._cds, timeouts)
7919
        if not compat.all(disk_results):
7920
          # TODO: Should the instance still be started, even if some disks
7921
          # failed to import (valid for local imports, too)?
7922
          self.LogWarning("Some disks for instance %s on node %s were not"
7923
                          " imported successfully" % (instance, pnode_name))
7924

    
7925
        # Run rename script on newly imported instance
7926
        assert iobj.name == instance
7927
        feedback_fn("Running rename script for %s" % instance)
7928
        result = self.rpc.call_instance_run_rename(pnode_name, iobj,
7929
                                                   self.source_instance_name,
7930
                                                   self.op.debug_level)
7931
        if result.fail_msg:
7932
          self.LogWarning("Failed to run rename script for %s on node"
7933
                          " %s: %s" % (instance, pnode_name, result.fail_msg))
7934

    
7935
      else:
7936
        # also checked in the prereq part
7937
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
7938
                                     % self.op.mode)
7939

    
7940
    if self.op.start:
7941
      iobj.admin_up = True
7942
      self.cfg.Update(iobj, feedback_fn)
7943
      logging.info("Starting instance %s on node %s", instance, pnode_name)
7944
      feedback_fn("* starting instance...")
7945
      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
7946
      result.Raise("Could not start instance")
7947

    
7948
    return list(iobj.all_nodes)
7949

    
7950

    
7951
class LUInstanceConsole(NoHooksLU):
7952
  """Connect to an instance's console.
7953

7954
  This is somewhat special in that it returns the command line that
7955
  you need to run on the master node in order to connect to the
7956
  console.
7957

7958
  """
7959
  REQ_BGL = False
7960

    
7961
  def ExpandNames(self):
7962
    self._ExpandAndLockInstance()
7963

    
7964
  def CheckPrereq(self):
7965
    """Check prerequisites.
7966

7967
    This checks that the instance is in the cluster.
7968

7969
    """
7970
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7971
    assert self.instance is not None, \
7972
      "Cannot retrieve locked instance %s" % self.op.instance_name
7973
    _CheckNodeOnline(self, self.instance.primary_node)
7974

    
7975
  def Exec(self, feedback_fn):
7976
    """Connect to the console of an instance
7977

7978
    """
7979
    instance = self.instance
7980
    node = instance.primary_node
7981

    
7982
    node_insts = self.rpc.call_instance_list([node],
7983
                                             [instance.hypervisor])[node]
7984
    node_insts.Raise("Can't get node information from %s" % node)
7985

    
7986
    if instance.name not in node_insts.payload:
7987
      if instance.admin_up:
7988
        state = "ERROR_down"
7989
      else:
7990
        state = "ADMIN_down"
7991
      raise errors.OpExecError("Instance %s is not running (state %s)" %
7992
                               (instance.name, state))
7993

    
7994
    logging.debug("Connecting to console of %s on %s", instance.name, node)
7995

    
7996
    return _GetInstanceConsole(self.cfg.GetClusterInfo(), instance)
7997

    
7998

    
7999
def _GetInstanceConsole(cluster, instance):
8000
  """Returns console information for an instance.
8001

8002
  @type cluster: L{objects.Cluster}
8003
  @type instance: L{objects.Instance}
8004
  @rtype: dict
8005

8006
  """
8007
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
8008
  # beparams and hvparams are passed separately, to avoid editing the
8009
  # instance and then saving the defaults in the instance itself.
8010
  hvparams = cluster.FillHV(instance)
8011
  beparams = cluster.FillBE(instance)
8012
  console = hyper.GetInstanceConsole(instance, hvparams, beparams)
8013

    
8014
  assert console.instance == instance.name
8015
  assert console.Validate()
8016

    
8017
  return console.ToDict()
8018

    
8019

    
8020
class LUInstanceReplaceDisks(LogicalUnit):
8021
  """Replace the disks of an instance.
8022

8023
  """
8024
  HPATH = "mirrors-replace"
8025
  HTYPE = constants.HTYPE_INSTANCE
8026
  REQ_BGL = False
8027

    
8028
  def CheckArguments(self):
8029
    TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
8030
                                  self.op.iallocator)
8031

    
8032
  def ExpandNames(self):
8033
    self._ExpandAndLockInstance()
8034

    
8035
    if self.op.iallocator is not None:
8036
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
8037

    
8038
    elif self.op.remote_node is not None:
8039
      remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
8040
      self.op.remote_node = remote_node
8041

    
8042
      # Warning: do not remove the locking of the new secondary here
8043
      # unless DRBD8.AddChildren is changed to work in parallel;
8044
      # currently it doesn't since parallel invocations of
8045
      # FindUnusedMinor will conflict
8046
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
8047
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
8048

    
8049
    else:
8050
      self.needed_locks[locking.LEVEL_NODE] = []
8051
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8052

    
8053
    self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
8054
                                   self.op.iallocator, self.op.remote_node,
8055
                                   self.op.disks, False, self.op.early_release)
8056

    
8057
    self.tasklets = [self.replacer]
8058

    
8059
  def DeclareLocks(self, level):
8060
    # If we're not already locking all nodes in the set we have to declare the
8061
    # instance's primary/secondary nodes.
8062
    if (level == locking.LEVEL_NODE and
8063
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
8064
      self._LockInstancesNodes()
8065

    
8066
  def BuildHooksEnv(self):
8067
    """Build hooks env.
8068

8069
    This runs on the master, the primary and all the secondaries.
8070

8071
    """
8072
    instance = self.replacer.instance
8073
    env = {
8074
      "MODE": self.op.mode,
8075
      "NEW_SECONDARY": self.op.remote_node,
8076
      "OLD_SECONDARY": instance.secondary_nodes[0],
8077
      }
8078
    env.update(_BuildInstanceHookEnvByObject(self, instance))
8079
    nl = [
8080
      self.cfg.GetMasterNode(),
8081
      instance.primary_node,
8082
      ]
8083
    if self.op.remote_node is not None:
8084
      nl.append(self.op.remote_node)
8085
    return env, nl, nl
8086

    
8087

    
8088
class TLReplaceDisks(Tasklet):
8089
  """Replaces disks for an instance.
8090

8091
  Note: Locking is not within the scope of this class.
8092

8093
  """
8094
  def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
8095
               disks, delay_iallocator, early_release):
8096
    """Initializes this class.
8097

8098
    """
8099
    Tasklet.__init__(self, lu)
8100

    
8101
    # Parameters
8102
    self.instance_name = instance_name
8103
    self.mode = mode
8104
    self.iallocator_name = iallocator_name
8105
    self.remote_node = remote_node
8106
    self.disks = disks
8107
    self.delay_iallocator = delay_iallocator
8108
    self.early_release = early_release
8109

    
8110
    # Runtime data
8111
    self.instance = None
8112
    self.new_node = None
8113
    self.target_node = None
8114
    self.other_node = None
8115
    self.remote_node_info = None
8116
    self.node_secondary_ip = None
8117

    
8118
  @staticmethod
8119
  def CheckArguments(mode, remote_node, iallocator):
8120
    """Helper function for users of this class.
8121

8122
    """
8123
    # check for valid parameter combination
8124
    if mode == constants.REPLACE_DISK_CHG:
8125
      if remote_node is None and iallocator is None:
8126
        raise errors.OpPrereqError("When changing the secondary either an"
8127
                                   " iallocator script must be used or the"
8128
                                   " new node given", errors.ECODE_INVAL)
8129

    
8130
      if remote_node is not None and iallocator is not None:
8131
        raise errors.OpPrereqError("Give either the iallocator or the new"
8132
                                   " secondary, not both", errors.ECODE_INVAL)
8133

    
8134
    elif remote_node is not None or iallocator is not None:
8135
      # Not replacing the secondary
8136
      raise errors.OpPrereqError("The iallocator and new node options can"
8137
                                 " only be used when changing the"
8138
                                 " secondary node", errors.ECODE_INVAL)
8139

    
8140
  @staticmethod
8141
  def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
8142
    """Compute a new secondary node using an IAllocator.
8143

8144
    """
8145
    ial = IAllocator(lu.cfg, lu.rpc,
8146
                     mode=constants.IALLOCATOR_MODE_RELOC,
8147
                     name=instance_name,
8148
                     relocate_from=relocate_from)
8149

    
8150
    ial.Run(iallocator_name)
8151

    
8152
    if not ial.success:
8153
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
8154
                                 " %s" % (iallocator_name, ial.info),
8155
                                 errors.ECODE_NORES)
8156

    
8157
    if len(ial.result) != ial.required_nodes:
8158
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
8159
                                 " of nodes (%s), required %s" %
8160
                                 (iallocator_name,
8161
                                  len(ial.result), ial.required_nodes),
8162
                                 errors.ECODE_FAULT)
8163

    
8164
    remote_node_name = ial.result[0]
8165

    
8166
    lu.LogInfo("Selected new secondary for instance '%s': %s",
8167
               instance_name, remote_node_name)
8168

    
8169
    return remote_node_name
8170

    
8171
  def _FindFaultyDisks(self, node_name):
8172
    return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
8173
                                    node_name, True)
8174

    
8175
  def CheckPrereq(self):
8176
    """Check prerequisites.
8177

8178
    This checks that the instance is in the cluster.
8179

8180
    """
8181
    self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
8182
    assert instance is not None, \
8183
      "Cannot retrieve locked instance %s" % self.instance_name
8184

    
8185
    if instance.disk_template != constants.DT_DRBD8:
8186
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
8187
                                 " instances", errors.ECODE_INVAL)
8188

    
8189
    if len(instance.secondary_nodes) != 1:
8190
      raise errors.OpPrereqError("The instance has a strange layout,"
8191
                                 " expected one secondary but found %d" %
8192
                                 len(instance.secondary_nodes),
8193
                                 errors.ECODE_FAULT)
8194

    
8195
    if not self.delay_iallocator:
8196
      self._CheckPrereq2()
8197

    
8198
  def _CheckPrereq2(self):
8199
    """Check prerequisites, second part.
8200

8201
    This function should always be part of CheckPrereq. It was separated and is
8202
    now called from Exec because during node evacuation iallocator was only
8203
    called with an unmodified cluster model, not taking planned changes into
8204
    account.
8205

8206
    """
8207
    instance = self.instance
8208
    secondary_node = instance.secondary_nodes[0]
8209

    
8210
    if self.iallocator_name is None:
8211
      remote_node = self.remote_node
8212
    else:
8213
      remote_node = self._RunAllocator(self.lu, self.iallocator_name,
8214
                                       instance.name, instance.secondary_nodes)
8215

    
8216
    if remote_node is not None:
8217
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
8218
      assert self.remote_node_info is not None, \
8219
        "Cannot retrieve locked node %s" % remote_node
8220
    else:
8221
      self.remote_node_info = None
8222

    
8223
    if remote_node == self.instance.primary_node:
8224
      raise errors.OpPrereqError("The specified node is the primary node of"
8225
                                 " the instance.", errors.ECODE_INVAL)
8226

    
8227
    if remote_node == secondary_node:
8228
      raise errors.OpPrereqError("The specified node is already the"
8229
                                 " secondary node of the instance.",
8230
                                 errors.ECODE_INVAL)
8231

    
8232
    if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
8233
                                    constants.REPLACE_DISK_CHG):
8234
      raise errors.OpPrereqError("Cannot specify disks to be replaced",
8235
                                 errors.ECODE_INVAL)
8236

    
8237
    if self.mode == constants.REPLACE_DISK_AUTO:
8238
      faulty_primary = self._FindFaultyDisks(instance.primary_node)
8239
      faulty_secondary = self._FindFaultyDisks(secondary_node)
8240

    
8241
      if faulty_primary and faulty_secondary:
8242
        raise errors.OpPrereqError("Instance %s has faulty disks on more than"
8243
                                   " one node and can not be repaired"
8244
                                   " automatically" % self.instance_name,
8245
                                   errors.ECODE_STATE)
8246

    
8247
      if faulty_primary:
8248
        self.disks = faulty_primary
8249
        self.target_node = instance.primary_node
8250
        self.other_node = secondary_node
8251
        check_nodes = [self.target_node, self.other_node]
8252
      elif faulty_secondary:
8253
        self.disks = faulty_secondary
8254
        self.target_node = secondary_node
8255
        self.other_node = instance.primary_node
8256
        check_nodes = [self.target_node, self.other_node]
8257
      else:
8258
        self.disks = []
8259
        check_nodes = []
8260

    
8261
    else:
8262
      # Non-automatic modes
8263
      if self.mode == constants.REPLACE_DISK_PRI:
8264
        self.target_node = instance.primary_node
8265
        self.other_node = secondary_node
8266
        check_nodes = [self.target_node, self.other_node]
8267

    
8268
      elif self.mode == constants.REPLACE_DISK_SEC:
8269
        self.target_node = secondary_node
8270
        self.other_node = instance.primary_node
8271
        check_nodes = [self.target_node, self.other_node]
8272

    
8273
      elif self.mode == constants.REPLACE_DISK_CHG:
8274
        self.new_node = remote_node
8275
        self.other_node = instance.primary_node
8276
        self.target_node = secondary_node
8277
        check_nodes = [self.new_node, self.other_node]
8278

    
8279
        _CheckNodeNotDrained(self.lu, remote_node)
8280
        _CheckNodeVmCapable(self.lu, remote_node)
8281

    
8282
        old_node_info = self.cfg.GetNodeInfo(secondary_node)
8283
        assert old_node_info is not None
8284
        if old_node_info.offline and not self.early_release:
8285
          # doesn't make sense to delay the release
8286
          self.early_release = True
8287
          self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
8288
                          " early-release mode", secondary_node)
8289

    
8290
      else:
8291
        raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
8292
                                     self.mode)
8293

    
8294
      # If not specified all disks should be replaced
8295
      if not self.disks:
8296
        self.disks = range(len(self.instance.disks))
8297

    
8298
    for node in check_nodes:
8299
      _CheckNodeOnline(self.lu, node)
8300

    
8301
    touched_nodes = frozenset([self.new_node, self.other_node,
8302
                               self.target_node])
8303

    
8304
    if self.lu.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
8305
      # Release unneeded node locks
8306
      for name in self.lu.acquired_locks[locking.LEVEL_NODE]:
8307
        if name not in touched_nodes:
8308
          self._ReleaseNodeLock(name)
8309

    
8310
    # Check whether disks are valid
8311
    for disk_idx in self.disks:
8312
      instance.FindDisk(disk_idx)
8313

    
8314
    # Get secondary node IP addresses
8315
    self.node_secondary_ip = \
8316
      dict((node_name, self.cfg.GetNodeInfo(node_name).secondary_ip)
8317
           for node_name in touched_nodes
8318
           if node_name is not None)
8319

    
8320
  def Exec(self, feedback_fn):
8321
    """Execute disk replacement.
8322

8323
    This dispatches the disk replacement to the appropriate handler.
8324

8325
    """
8326
    if self.delay_iallocator:
8327
      self._CheckPrereq2()
8328

    
8329
    if (self.lu.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET and
8330
        __debug__):
8331
      # Verify owned locks before starting operation
8332
      owned_locks = self.lu.context.glm.list_owned(locking.LEVEL_NODE)
8333
      assert set(owned_locks) == set(self.node_secondary_ip), \
8334
          "Not owning the correct locks: %s" % (owned_locks, )
8335

    
8336
    if not self.disks:
8337
      feedback_fn("No disks need replacement")
8338
      return
8339

    
8340
    feedback_fn("Replacing disk(s) %s for %s" %
8341
                (utils.CommaJoin(self.disks), self.instance.name))
8342

    
8343
    activate_disks = (not self.instance.admin_up)
8344

    
8345
    # Activate the instance disks if we're replacing them on a down instance
8346
    if activate_disks:
8347
      _StartInstanceDisks(self.lu, self.instance, True)
8348

    
8349
    try:
8350
      # Should we replace the secondary node?
8351
      if self.new_node is not None:
8352
        fn = self._ExecDrbd8Secondary
8353
      else:
8354
        fn = self._ExecDrbd8DiskOnly
8355

    
8356
      result = fn(feedback_fn)
8357
    finally:
8358
      # Deactivate the instance disks if we're replacing them on a
8359
      # down instance
8360
      if activate_disks:
8361
        _SafeShutdownInstanceDisks(self.lu, self.instance)
8362

    
8363
    if __debug__:
8364
      # Verify owned locks
8365
      owned_locks = self.lu.context.glm.list_owned(locking.LEVEL_NODE)
8366
      assert ((self.early_release and not owned_locks) or
8367
              (not self.early_release and
8368
               set(owned_locks) == set(self.node_secondary_ip))), \
8369
        ("Not owning the correct locks, early_release=%s, owned=%r" %
8370
         (self.early_release, owned_locks))
8371

    
8372
    return result
8373

    
8374
  def _CheckVolumeGroup(self, nodes):
8375
    self.lu.LogInfo("Checking volume groups")
8376

    
8377
    vgname = self.cfg.GetVGName()
8378

    
8379
    # Make sure volume group exists on all involved nodes
8380
    results = self.rpc.call_vg_list(nodes)
8381
    if not results:
8382
      raise errors.OpExecError("Can't list volume groups on the nodes")
8383

    
8384
    for node in nodes:
8385
      res = results[node]
8386
      res.Raise("Error checking node %s" % node)
8387
      if vgname not in res.payload:
8388
        raise errors.OpExecError("Volume group '%s' not found on node %s" %
8389
                                 (vgname, node))
8390

    
8391
  def _CheckDisksExistence(self, nodes):
8392
    # Check disk existence
8393
    for idx, dev in enumerate(self.instance.disks):
8394
      if idx not in self.disks:
8395
        continue
8396

    
8397
      for node in nodes:
8398
        self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
8399
        self.cfg.SetDiskID(dev, node)
8400

    
8401
        result = self.rpc.call_blockdev_find(node, dev)
8402

    
8403
        msg = result.fail_msg
8404
        if msg or not result.payload:
8405
          if not msg:
8406
            msg = "disk not found"
8407
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
8408
                                   (idx, node, msg))
8409

    
8410
  def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
8411
    for idx, dev in enumerate(self.instance.disks):
8412
      if idx not in self.disks:
8413
        continue
8414

    
8415
      self.lu.LogInfo("Checking disk/%d consistency on node %s" %
8416
                      (idx, node_name))
8417

    
8418
      if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary,
8419
                                   ldisk=ldisk):
8420
        raise errors.OpExecError("Node %s has degraded storage, unsafe to"
8421
                                 " replace disks for instance %s" %
8422
                                 (node_name, self.instance.name))
8423

    
8424
  def _CreateNewStorage(self, node_name):
8425
    """Create new storage on the primary or secondary node.
8426

8427
    This is only used for same-node replaces, not for changing the
8428
    secondary node, hence we don't want to modify the existing disk.
8429

8430
    """
8431
    iv_names = {}
8432

    
8433
    for idx, dev in enumerate(self.instance.disks):
8434
      if idx not in self.disks:
8435
        continue
8436

    
8437
      self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
8438

    
8439
      self.cfg.SetDiskID(dev, node_name)
8440

    
8441
      lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
8442
      names = _GenerateUniqueNames(self.lu, lv_names)
8443

    
8444
      vg_data = dev.children[0].logical_id[0]
8445
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
8446
                             logical_id=(vg_data, names[0]))
8447
      vg_meta = dev.children[1].logical_id[0]
8448
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
8449
                             logical_id=(vg_meta, names[1]))
8450

    
8451
      new_lvs = [lv_data, lv_meta]
8452
      old_lvs = [child.Copy() for child in dev.children]
8453
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
8454

    
8455
      # we pass force_create=True to force the LVM creation
8456
      for new_lv in new_lvs:
8457
        _CreateBlockDev(self.lu, node_name, self.instance, new_lv, True,
8458
                        _GetInstanceInfoText(self.instance), False)
8459

    
8460
    return iv_names
8461

    
8462
  def _CheckDevices(self, node_name, iv_names):
8463
    for name, (dev, _, _) in iv_names.iteritems():
8464
      self.cfg.SetDiskID(dev, node_name)
8465

    
8466
      result = self.rpc.call_blockdev_find(node_name, dev)
8467

    
8468
      msg = result.fail_msg
8469
      if msg or not result.payload:
8470
        if not msg:
8471
          msg = "disk not found"
8472
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
8473
                                 (name, msg))
8474

    
8475
      if result.payload.is_degraded:
8476
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
8477

    
8478
  def _RemoveOldStorage(self, node_name, iv_names):
8479
    for name, (_, old_lvs, _) in iv_names.iteritems():
8480
      self.lu.LogInfo("Remove logical volumes for %s" % name)
8481

    
8482
      for lv in old_lvs:
8483
        self.cfg.SetDiskID(lv, node_name)
8484

    
8485
        msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
8486
        if msg:
8487
          self.lu.LogWarning("Can't remove old LV: %s" % msg,
8488
                             hint="remove unused LVs manually")
8489

    
8490
  def _ReleaseNodeLock(self, node_name):
8491
    """Releases the lock for a given node."""
8492
    self.lu.context.glm.release(locking.LEVEL_NODE, node_name)
8493

    
8494
  def _ExecDrbd8DiskOnly(self, feedback_fn):
8495
    """Replace a disk on the primary or secondary for DRBD 8.
8496

8497
    The algorithm for replace is quite complicated:
8498

8499
      1. for each disk to be replaced:
8500

8501
        1. create new LVs on the target node with unique names
8502
        1. detach old LVs from the drbd device
8503
        1. rename old LVs to name_replaced.<time_t>
8504
        1. rename new LVs to old LVs
8505
        1. attach the new LVs (with the old names now) to the drbd device
8506

8507
      1. wait for sync across all devices
8508

8509
      1. for each modified disk:
8510

8511
        1. remove old LVs (which have the name name_replaces.<time_t>)
8512

8513
    Failures are not very well handled.
8514

8515
    """
8516
    steps_total = 6
8517

    
8518
    # Step: check device activation
8519
    self.lu.LogStep(1, steps_total, "Check device existence")
8520
    self._CheckDisksExistence([self.other_node, self.target_node])
8521
    self._CheckVolumeGroup([self.target_node, self.other_node])
8522

    
8523
    # Step: check other node consistency
8524
    self.lu.LogStep(2, steps_total, "Check peer consistency")
8525
    self._CheckDisksConsistency(self.other_node,
8526
                                self.other_node == self.instance.primary_node,
8527
                                False)
8528

    
8529
    # Step: create new storage
8530
    self.lu.LogStep(3, steps_total, "Allocate new storage")
8531
    iv_names = self._CreateNewStorage(self.target_node)
8532

    
8533
    # Step: for each lv, detach+rename*2+attach
8534
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
8535
    for dev, old_lvs, new_lvs in iv_names.itervalues():
8536
      self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
8537

    
8538
      result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
8539
                                                     old_lvs)
8540
      result.Raise("Can't detach drbd from local storage on node"
8541
                   " %s for device %s" % (self.target_node, dev.iv_name))
8542
      #dev.children = []
8543
      #cfg.Update(instance)
8544

    
8545
      # ok, we created the new LVs, so now we know we have the needed
8546
      # storage; as such, we proceed on the target node to rename
8547
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
8548
      # using the assumption that logical_id == physical_id (which in
8549
      # turn is the unique_id on that node)
8550

    
8551
      # FIXME(iustin): use a better name for the replaced LVs
8552
      temp_suffix = int(time.time())
8553
      ren_fn = lambda d, suff: (d.physical_id[0],
8554
                                d.physical_id[1] + "_replaced-%s" % suff)
8555

    
8556
      # Build the rename list based on what LVs exist on the node
8557
      rename_old_to_new = []
8558
      for to_ren in old_lvs:
8559
        result = self.rpc.call_blockdev_find(self.target_node, to_ren)
8560
        if not result.fail_msg and result.payload:
8561
          # device exists
8562
          rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
8563

    
8564
      self.lu.LogInfo("Renaming the old LVs on the target node")
8565
      result = self.rpc.call_blockdev_rename(self.target_node,
8566
                                             rename_old_to_new)
8567
      result.Raise("Can't rename old LVs on node %s" % self.target_node)
8568

    
8569
      # Now we rename the new LVs to the old LVs
8570
      self.lu.LogInfo("Renaming the new LVs on the target node")
8571
      rename_new_to_old = [(new, old.physical_id)
8572
                           for old, new in zip(old_lvs, new_lvs)]
8573
      result = self.rpc.call_blockdev_rename(self.target_node,
8574
                                             rename_new_to_old)
8575
      result.Raise("Can't rename new LVs on node %s" % self.target_node)
8576

    
8577
      # Intermediate steps of in memory modifications
8578
      for old, new in zip(old_lvs, new_lvs):
8579
        new.logical_id = old.logical_id
8580
        self.cfg.SetDiskID(new, self.target_node)
8581

    
8582
      # We need to modify old_lvs so that removal later removes the
8583
      # right LVs, not the newly added ones; note that old_lvs is a
8584
      # copy here
8585
      for disk in old_lvs:
8586
        disk.logical_id = ren_fn(disk, temp_suffix)
8587
        self.cfg.SetDiskID(disk, self.target_node)
8588

    
8589
      # Now that the new lvs have the old name, we can add them to the device
8590
      self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
8591
      result = self.rpc.call_blockdev_addchildren(self.target_node, dev,
8592
                                                  new_lvs)
8593
      msg = result.fail_msg
8594
      if msg:
8595
        for new_lv in new_lvs:
8596
          msg2 = self.rpc.call_blockdev_remove(self.target_node,
8597
                                               new_lv).fail_msg
8598
          if msg2:
8599
            self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
8600
                               hint=("cleanup manually the unused logical"
8601
                                     "volumes"))
8602
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
8603

    
8604
    cstep = 5
8605
    if self.early_release:
8606
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
8607
      cstep += 1
8608
      self._RemoveOldStorage(self.target_node, iv_names)
8609
      # WARNING: we release both node locks here, do not do other RPCs
8610
      # than WaitForSync to the primary node
8611
      self._ReleaseNodeLock([self.target_node, self.other_node])
8612

    
8613
    # Wait for sync
8614
    # This can fail as the old devices are degraded and _WaitForSync
8615
    # does a combined result over all disks, so we don't check its return value
8616
    self.lu.LogStep(cstep, steps_total, "Sync devices")
8617
    cstep += 1
8618
    _WaitForSync(self.lu, self.instance)
8619

    
8620
    # Check all devices manually
8621
    self._CheckDevices(self.instance.primary_node, iv_names)
8622

    
8623
    # Step: remove old storage
8624
    if not self.early_release:
8625
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
8626
      cstep += 1
8627
      self._RemoveOldStorage(self.target_node, iv_names)
8628

    
8629
  def _ExecDrbd8Secondary(self, feedback_fn):
8630
    """Replace the secondary node for DRBD 8.
8631

8632
    The algorithm for replace is quite complicated:
8633
      - for all disks of the instance:
8634
        - create new LVs on the new node with same names
8635
        - shutdown the drbd device on the old secondary
8636
        - disconnect the drbd network on the primary
8637
        - create the drbd device on the new secondary
8638
        - network attach the drbd on the primary, using an artifice:
8639
          the drbd code for Attach() will connect to the network if it
8640
          finds a device which is connected to the good local disks but
8641
          not network enabled
8642
      - wait for sync across all devices
8643
      - remove all disks from the old secondary
8644

8645
    Failures are not very well handled.
8646

8647
    """
8648
    steps_total = 6
8649

    
8650
    # Step: check device activation
8651
    self.lu.LogStep(1, steps_total, "Check device existence")
8652
    self._CheckDisksExistence([self.instance.primary_node])
8653
    self._CheckVolumeGroup([self.instance.primary_node])
8654

    
8655
    # Step: check other node consistency
8656
    self.lu.LogStep(2, steps_total, "Check peer consistency")
8657
    self._CheckDisksConsistency(self.instance.primary_node, True, True)
8658

    
8659
    # Step: create new storage
8660
    self.lu.LogStep(3, steps_total, "Allocate new storage")
8661
    for idx, dev in enumerate(self.instance.disks):
8662
      self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
8663
                      (self.new_node, idx))
8664
      # we pass force_create=True to force LVM creation
8665
      for new_lv in dev.children:
8666
        _CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True,
8667
                        _GetInstanceInfoText(self.instance), False)
8668

    
8669
    # Step 4: dbrd minors and drbd setups changes
8670
    # after this, we must manually remove the drbd minors on both the
8671
    # error and the success paths
8672
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
8673
    minors = self.cfg.AllocateDRBDMinor([self.new_node
8674
                                         for dev in self.instance.disks],
8675
                                        self.instance.name)
8676
    logging.debug("Allocated minors %r", minors)
8677

    
8678
    iv_names = {}
8679
    for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
8680
      self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
8681
                      (self.new_node, idx))
8682
      # create new devices on new_node; note that we create two IDs:
8683
      # one without port, so the drbd will be activated without
8684
      # networking information on the new node at this stage, and one
8685
      # with network, for the latter activation in step 4
8686
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
8687
      if self.instance.primary_node == o_node1:
8688
        p_minor = o_minor1
8689
      else:
8690
        assert self.instance.primary_node == o_node2, "Three-node instance?"
8691
        p_minor = o_minor2
8692

    
8693
      new_alone_id = (self.instance.primary_node, self.new_node, None,
8694
                      p_minor, new_minor, o_secret)
8695
      new_net_id = (self.instance.primary_node, self.new_node, o_port,
8696
                    p_minor, new_minor, o_secret)
8697

    
8698
      iv_names[idx] = (dev, dev.children, new_net_id)
8699
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
8700
                    new_net_id)
8701
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
8702
                              logical_id=new_alone_id,
8703
                              children=dev.children,
8704
                              size=dev.size)
8705
      try:
8706
        _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
8707
                              _GetInstanceInfoText(self.instance), False)
8708
      except errors.GenericError:
8709
        self.cfg.ReleaseDRBDMinors(self.instance.name)
8710
        raise
8711

    
8712
    # We have new devices, shutdown the drbd on the old secondary
8713
    for idx, dev in enumerate(self.instance.disks):
8714
      self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
8715
      self.cfg.SetDiskID(dev, self.target_node)
8716
      msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
8717
      if msg:
8718
        self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
8719
                           "node: %s" % (idx, msg),
8720
                           hint=("Please cleanup this device manually as"
8721
                                 " soon as possible"))
8722

    
8723
    self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
8724
    result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node],
8725
                                               self.node_secondary_ip,
8726
                                               self.instance.disks)\
8727
                                              [self.instance.primary_node]
8728

    
8729
    msg = result.fail_msg
8730
    if msg:
8731
      # detaches didn't succeed (unlikely)
8732
      self.cfg.ReleaseDRBDMinors(self.instance.name)
8733
      raise errors.OpExecError("Can't detach the disks from the network on"
8734
                               " old node: %s" % (msg,))
8735

    
8736
    # if we managed to detach at least one, we update all the disks of
8737
    # the instance to point to the new secondary
8738
    self.lu.LogInfo("Updating instance configuration")
8739
    for dev, _, new_logical_id in iv_names.itervalues():
8740
      dev.logical_id = new_logical_id
8741
      self.cfg.SetDiskID(dev, self.instance.primary_node)
8742

    
8743
    self.cfg.Update(self.instance, feedback_fn)
8744

    
8745
    # and now perform the drbd attach
8746
    self.lu.LogInfo("Attaching primary drbds to new secondary"
8747
                    " (standalone => connected)")
8748
    result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
8749
                                            self.new_node],
8750
                                           self.node_secondary_ip,
8751
                                           self.instance.disks,
8752
                                           self.instance.name,
8753
                                           False)
8754
    for to_node, to_result in result.items():
8755
      msg = to_result.fail_msg
8756
      if msg:
8757
        self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
8758
                           to_node, msg,
8759
                           hint=("please do a gnt-instance info to see the"
8760
                                 " status of disks"))
8761
    cstep = 5
8762
    if self.early_release:
8763
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
8764
      cstep += 1
8765
      self._RemoveOldStorage(self.target_node, iv_names)
8766
      # WARNING: we release all node locks here, do not do other RPCs
8767
      # than WaitForSync to the primary node
8768
      self._ReleaseNodeLock([self.instance.primary_node,
8769
                             self.target_node,
8770
                             self.new_node])
8771

    
8772
    # Wait for sync
8773
    # This can fail as the old devices are degraded and _WaitForSync
8774
    # does a combined result over all disks, so we don't check its return value
8775
    self.lu.LogStep(cstep, steps_total, "Sync devices")
8776
    cstep += 1
8777
    _WaitForSync(self.lu, self.instance)
8778

    
8779
    # Check all devices manually
8780
    self._CheckDevices(self.instance.primary_node, iv_names)
8781

    
8782
    # Step: remove old storage
8783
    if not self.early_release:
8784
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
8785
      self._RemoveOldStorage(self.target_node, iv_names)
8786

    
8787

    
8788
class LURepairNodeStorage(NoHooksLU):
8789
  """Repairs the volume group on a node.
8790

8791
  """
8792
  REQ_BGL = False
8793

    
8794
  def CheckArguments(self):
8795
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
8796

    
8797
    storage_type = self.op.storage_type
8798

    
8799
    if (constants.SO_FIX_CONSISTENCY not in
8800
        constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
8801
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
8802
                                 " repaired" % storage_type,
8803
                                 errors.ECODE_INVAL)
8804

    
8805
  def ExpandNames(self):
8806
    self.needed_locks = {
8807
      locking.LEVEL_NODE: [self.op.node_name],
8808
      }
8809

    
8810
  def _CheckFaultyDisks(self, instance, node_name):
8811
    """Ensure faulty disks abort the opcode or at least warn."""
8812
    try:
8813
      if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
8814
                                  node_name, True):
8815
        raise errors.OpPrereqError("Instance '%s' has faulty disks on"
8816
                                   " node '%s'" % (instance.name, node_name),
8817
                                   errors.ECODE_STATE)
8818
    except errors.OpPrereqError, err:
8819
      if self.op.ignore_consistency:
8820
        self.proc.LogWarning(str(err.args[0]))
8821
      else:
8822
        raise
8823

    
8824
  def CheckPrereq(self):
8825
    """Check prerequisites.
8826

8827
    """
8828
    # Check whether any instance on this node has faulty disks
8829
    for inst in _GetNodeInstances(self.cfg, self.op.node_name):
8830
      if not inst.admin_up:
8831
        continue
8832
      check_nodes = set(inst.all_nodes)
8833
      check_nodes.discard(self.op.node_name)
8834
      for inst_node_name in check_nodes:
8835
        self._CheckFaultyDisks(inst, inst_node_name)
8836

    
8837
  def Exec(self, feedback_fn):
8838
    feedback_fn("Repairing storage unit '%s' on %s ..." %
8839
                (self.op.name, self.op.node_name))
8840

    
8841
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
8842
    result = self.rpc.call_storage_execute(self.op.node_name,
8843
                                           self.op.storage_type, st_args,
8844
                                           self.op.name,
8845
                                           constants.SO_FIX_CONSISTENCY)
8846
    result.Raise("Failed to repair storage unit '%s' on %s" %
8847
                 (self.op.name, self.op.node_name))
8848

    
8849

    
8850
class LUNodeEvacStrategy(NoHooksLU):
8851
  """Computes the node evacuation strategy.
8852

8853
  """
8854
  REQ_BGL = False
8855

    
8856
  def CheckArguments(self):
8857
    _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
8858

    
8859
  def ExpandNames(self):
8860
    self.op.nodes = _GetWantedNodes(self, self.op.nodes)
8861
    self.needed_locks = locks = {}
8862
    if self.op.remote_node is None:
8863
      locks[locking.LEVEL_NODE] = locking.ALL_SET
8864
    else:
8865
      self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
8866
      locks[locking.LEVEL_NODE] = self.op.nodes + [self.op.remote_node]
8867

    
8868
  def Exec(self, feedback_fn):
8869
    instances = []
8870
    for node in self.op.nodes:
8871
      instances.extend(_GetNodeSecondaryInstances(self.cfg, node))
8872
    if not instances:
8873
      return []
8874

    
8875
    if self.op.remote_node is not None:
8876
      result = []
8877
      for i in instances:
8878
        if i.primary_node == self.op.remote_node:
8879
          raise errors.OpPrereqError("Node %s is the primary node of"
8880
                                     " instance %s, cannot use it as"
8881
                                     " secondary" %
8882
                                     (self.op.remote_node, i.name),
8883
                                     errors.ECODE_INVAL)
8884
        result.append([i.name, self.op.remote_node])
8885
    else:
8886
      ial = IAllocator(self.cfg, self.rpc,
8887
                       mode=constants.IALLOCATOR_MODE_MEVAC,
8888
                       evac_nodes=self.op.nodes)
8889
      ial.Run(self.op.iallocator, validate=True)
8890
      if not ial.success:
8891
        raise errors.OpExecError("No valid evacuation solution: %s" % ial.info,
8892
                                 errors.ECODE_NORES)
8893
      result = ial.result
8894
    return result
8895

    
8896

    
8897
class LUInstanceGrowDisk(LogicalUnit):
8898
  """Grow a disk of an instance.
8899

8900
  """
8901
  HPATH = "disk-grow"
8902
  HTYPE = constants.HTYPE_INSTANCE
8903
  REQ_BGL = False
8904

    
8905
  def ExpandNames(self):
8906
    self._ExpandAndLockInstance()
8907
    self.needed_locks[locking.LEVEL_NODE] = []
8908
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8909

    
8910
  def DeclareLocks(self, level):
8911
    if level == locking.LEVEL_NODE:
8912
      self._LockInstancesNodes()
8913

    
8914
  def BuildHooksEnv(self):
8915
    """Build hooks env.
8916

8917
    This runs on the master, the primary and all the secondaries.
8918

8919
    """
8920
    env = {
8921
      "DISK": self.op.disk,
8922
      "AMOUNT": self.op.amount,
8923
      }
8924
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
8925
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
8926
    return env, nl, nl
8927

    
8928
  def CheckPrereq(self):
8929
    """Check prerequisites.
8930

8931
    This checks that the instance is in the cluster.
8932

8933
    """
8934
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
8935
    assert instance is not None, \
8936
      "Cannot retrieve locked instance %s" % self.op.instance_name
8937
    nodenames = list(instance.all_nodes)
8938
    for node in nodenames:
8939
      _CheckNodeOnline(self, node)
8940

    
8941
    self.instance = instance
8942

    
8943
    if instance.disk_template not in constants.DTS_GROWABLE:
8944
      raise errors.OpPrereqError("Instance's disk layout does not support"
8945
                                 " growing.", errors.ECODE_INVAL)
8946

    
8947
    self.disk = instance.FindDisk(self.op.disk)
8948

    
8949
    if instance.disk_template != constants.DT_FILE:
8950
      # TODO: check the free disk space for file, when that feature
8951
      # will be supported
8952
      _CheckNodesFreeDiskPerVG(self, nodenames,
8953
                               self.disk.ComputeGrowth(self.op.amount))
8954

    
8955
  def Exec(self, feedback_fn):
8956
    """Execute disk grow.
8957

8958
    """
8959
    instance = self.instance
8960
    disk = self.disk
8961

    
8962
    disks_ok, _ = _AssembleInstanceDisks(self, self.instance, disks=[disk])
8963
    if not disks_ok:
8964
      raise errors.OpExecError("Cannot activate block device to grow")
8965

    
8966
    for node in instance.all_nodes:
8967
      self.cfg.SetDiskID(disk, node)
8968
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
8969
      result.Raise("Grow request failed to node %s" % node)
8970

    
8971
      # TODO: Rewrite code to work properly
8972
      # DRBD goes into sync mode for a short amount of time after executing the
8973
      # "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby
8974
      # calling "resize" in sync mode fails. Sleeping for a short amount of
8975
      # time is a work-around.
8976
      time.sleep(5)
8977

    
8978
    disk.RecordGrow(self.op.amount)
8979
    self.cfg.Update(instance, feedback_fn)
8980
    if self.op.wait_for_sync:
8981
      disk_abort = not _WaitForSync(self, instance, disks=[disk])
8982
      if disk_abort:
8983
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
8984
                             " status.\nPlease check the instance.")
8985
      if not instance.admin_up:
8986
        _SafeShutdownInstanceDisks(self, instance, disks=[disk])
8987
    elif not instance.admin_up:
8988
      self.proc.LogWarning("Not shutting down the disk even if the instance is"
8989
                           " not supposed to be running because no wait for"
8990
                           " sync mode was requested.")
8991

    
8992

    
8993
class LUInstanceQueryData(NoHooksLU):
8994
  """Query runtime instance data.
8995

8996
  """
8997
  REQ_BGL = False
8998

    
8999
  def ExpandNames(self):
9000
    self.needed_locks = {}
9001

    
9002
    # Use locking if requested or when non-static information is wanted
9003
    if not (self.op.static or self.op.use_locking):
9004
      self.LogWarning("Non-static data requested, locks need to be acquired")
9005
      self.op.use_locking = True
9006

    
9007
    if self.op.instances or not self.op.use_locking:
9008
      # Expand instance names right here
9009
      self.wanted_names = _GetWantedInstances(self, self.op.instances)
9010
    else:
9011
      # Will use acquired locks
9012
      self.wanted_names = None
9013

    
9014
    if self.op.use_locking:
9015
      self.share_locks = dict.fromkeys(locking.LEVELS, 1)
9016

    
9017
      if self.wanted_names is None:
9018
        self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
9019
      else:
9020
        self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
9021

    
9022
      self.needed_locks[locking.LEVEL_NODE] = []
9023
      self.share_locks = dict.fromkeys(locking.LEVELS, 1)
9024
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
9025

    
9026
  def DeclareLocks(self, level):
9027
    if self.op.use_locking and level == locking.LEVEL_NODE:
9028
      self._LockInstancesNodes()
9029

    
9030
  def CheckPrereq(self):
9031
    """Check prerequisites.
9032

9033
    This only checks the optional instance list against the existing names.
9034

9035
    """
9036
    if self.wanted_names is None:
9037
      assert self.op.use_locking, "Locking was not used"
9038
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
9039

    
9040
    self.wanted_instances = [self.cfg.GetInstanceInfo(name)
9041
                             for name in self.wanted_names]
9042

    
9043
  def _ComputeBlockdevStatus(self, node, instance_name, dev):
9044
    """Returns the status of a block device
9045

9046
    """
9047
    if self.op.static or not node:
9048
      return None
9049

    
9050
    self.cfg.SetDiskID(dev, node)
9051

    
9052
    result = self.rpc.call_blockdev_find(node, dev)
9053
    if result.offline:
9054
      return None
9055

    
9056
    result.Raise("Can't compute disk status for %s" % instance_name)
9057

    
9058
    status = result.payload
9059
    if status is None:
9060
      return None
9061

    
9062
    return (status.dev_path, status.major, status.minor,
9063
            status.sync_percent, status.estimated_time,
9064
            status.is_degraded, status.ldisk_status)
9065

    
9066
  def _ComputeDiskStatus(self, instance, snode, dev):
9067
    """Compute block device status.
9068

9069
    """
9070
    if dev.dev_type in constants.LDS_DRBD:
9071
      # we change the snode then (otherwise we use the one passed in)
9072
      if dev.logical_id[0] == instance.primary_node:
9073
        snode = dev.logical_id[1]
9074
      else:
9075
        snode = dev.logical_id[0]
9076

    
9077
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
9078
                                              instance.name, dev)
9079
    dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
9080

    
9081
    if dev.children:
9082
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
9083
                      for child in dev.children]
9084
    else:
9085
      dev_children = []
9086

    
9087
    return {
9088
      "iv_name": dev.iv_name,
9089
      "dev_type": dev.dev_type,
9090
      "logical_id": dev.logical_id,
9091
      "physical_id": dev.physical_id,
9092
      "pstatus": dev_pstatus,
9093
      "sstatus": dev_sstatus,
9094
      "children": dev_children,
9095
      "mode": dev.mode,
9096
      "size": dev.size,
9097
      }
9098

    
9099
  def Exec(self, feedback_fn):
9100
    """Gather and return data"""
9101
    result = {}
9102

    
9103
    cluster = self.cfg.GetClusterInfo()
9104

    
9105
    for instance in self.wanted_instances:
9106
      if not self.op.static:
9107
        remote_info = self.rpc.call_instance_info(instance.primary_node,
9108
                                                  instance.name,
9109
                                                  instance.hypervisor)
9110
        remote_info.Raise("Error checking node %s" % instance.primary_node)
9111
        remote_info = remote_info.payload
9112
        if remote_info and "state" in remote_info:
9113
          remote_state = "up"
9114
        else:
9115
          remote_state = "down"
9116
      else:
9117
        remote_state = None
9118
      if instance.admin_up:
9119
        config_state = "up"
9120
      else:
9121
        config_state = "down"
9122

    
9123
      disks = [self._ComputeDiskStatus(instance, None, device)
9124
               for device in instance.disks]
9125

    
9126
      result[instance.name] = {
9127
        "name": instance.name,
9128
        "config_state": config_state,
9129
        "run_state": remote_state,
9130
        "pnode": instance.primary_node,
9131
        "snodes": instance.secondary_nodes,
9132
        "os": instance.os,
9133
        # this happens to be the same format used for hooks
9134
        "nics": _NICListToTuple(self, instance.nics),
9135
        "disk_template": instance.disk_template,
9136
        "disks": disks,
9137
        "hypervisor": instance.hypervisor,
9138
        "network_port": instance.network_port,
9139
        "hv_instance": instance.hvparams,
9140
        "hv_actual": cluster.FillHV(instance, skip_globals=True),
9141
        "be_instance": instance.beparams,
9142
        "be_actual": cluster.FillBE(instance),
9143
        "os_instance": instance.osparams,
9144
        "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
9145
        "serial_no": instance.serial_no,
9146
        "mtime": instance.mtime,
9147
        "ctime": instance.ctime,
9148
        "uuid": instance.uuid,
9149
        }
9150

    
9151
    return result
9152

    
9153

    
9154
class LUInstanceSetParams(LogicalUnit):
9155
  """Modifies an instances's parameters.
9156

9157
  """
9158
  HPATH = "instance-modify"
9159
  HTYPE = constants.HTYPE_INSTANCE
9160
  REQ_BGL = False
9161

    
9162
  def CheckArguments(self):
9163
    if not (self.op.nics or self.op.disks or self.op.disk_template or
9164
            self.op.hvparams or self.op.beparams or self.op.os_name):
9165
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
9166

    
9167
    if self.op.hvparams:
9168
      _CheckGlobalHvParams(self.op.hvparams)
9169

    
9170
    # Disk validation
9171
    disk_addremove = 0
9172
    for disk_op, disk_dict in self.op.disks:
9173
      utils.ForceDictType(disk_dict, constants.IDISK_PARAMS_TYPES)
9174
      if disk_op == constants.DDM_REMOVE:
9175
        disk_addremove += 1
9176
        continue
9177
      elif disk_op == constants.DDM_ADD:
9178
        disk_addremove += 1
9179
      else:
9180
        if not isinstance(disk_op, int):
9181
          raise errors.OpPrereqError("Invalid disk index", errors.ECODE_INVAL)
9182
        if not isinstance(disk_dict, dict):
9183
          msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
9184
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
9185

    
9186
      if disk_op == constants.DDM_ADD:
9187
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
9188
        if mode not in constants.DISK_ACCESS_SET:
9189
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
9190
                                     errors.ECODE_INVAL)
9191
        size = disk_dict.get('size', None)
9192
        if size is None:
9193
          raise errors.OpPrereqError("Required disk parameter size missing",
9194
                                     errors.ECODE_INVAL)
9195
        try:
9196
          size = int(size)
9197
        except (TypeError, ValueError), err:
9198
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
9199
                                     str(err), errors.ECODE_INVAL)
9200
        disk_dict['size'] = size
9201
      else:
9202
        # modification of disk
9203
        if 'size' in disk_dict:
9204
          raise errors.OpPrereqError("Disk size change not possible, use"
9205
                                     " grow-disk", errors.ECODE_INVAL)
9206

    
9207
    if disk_addremove > 1:
9208
      raise errors.OpPrereqError("Only one disk add or remove operation"
9209
                                 " supported at a time", errors.ECODE_INVAL)
9210

    
9211
    if self.op.disks and self.op.disk_template is not None:
9212
      raise errors.OpPrereqError("Disk template conversion and other disk"
9213
                                 " changes not supported at the same time",
9214
                                 errors.ECODE_INVAL)
9215

    
9216
    if (self.op.disk_template and
9217
        self.op.disk_template in constants.DTS_NET_MIRROR and
9218
        self.op.remote_node is None):
9219
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
9220
                                 " one requires specifying a secondary node",
9221
                                 errors.ECODE_INVAL)
9222

    
9223
    # NIC validation
9224
    nic_addremove = 0
9225
    for nic_op, nic_dict in self.op.nics:
9226
      utils.ForceDictType(nic_dict, constants.INIC_PARAMS_TYPES)
9227
      if nic_op == constants.DDM_REMOVE:
9228
        nic_addremove += 1
9229
        continue
9230
      elif nic_op == constants.DDM_ADD:
9231
        nic_addremove += 1
9232
      else:
9233
        if not isinstance(nic_op, int):
9234
          raise errors.OpPrereqError("Invalid nic index", errors.ECODE_INVAL)
9235
        if not isinstance(nic_dict, dict):
9236
          msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
9237
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
9238

    
9239
      # nic_dict should be a dict
9240
      nic_ip = nic_dict.get('ip', None)
9241
      if nic_ip is not None:
9242
        if nic_ip.lower() == constants.VALUE_NONE:
9243
          nic_dict['ip'] = None
9244
        else:
9245
          if not netutils.IPAddress.IsValid(nic_ip):
9246
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip,
9247
                                       errors.ECODE_INVAL)
9248

    
9249
      nic_bridge = nic_dict.get('bridge', None)
9250
      nic_link = nic_dict.get('link', None)
9251
      if nic_bridge and nic_link:
9252
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
9253
                                   " at the same time", errors.ECODE_INVAL)
9254
      elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
9255
        nic_dict['bridge'] = None
9256
      elif nic_link and nic_link.lower() == constants.VALUE_NONE:
9257
        nic_dict['link'] = None
9258

    
9259
      if nic_op == constants.DDM_ADD:
9260
        nic_mac = nic_dict.get('mac', None)
9261
        if nic_mac is None:
9262
          nic_dict['mac'] = constants.VALUE_AUTO
9263

    
9264
      if 'mac' in nic_dict:
9265
        nic_mac = nic_dict['mac']
9266
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
9267
          nic_mac = utils.NormalizeAndValidateMac(nic_mac)
9268

    
9269
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
9270
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
9271
                                     " modifying an existing nic",
9272
                                     errors.ECODE_INVAL)
9273

    
9274
    if nic_addremove > 1:
9275
      raise errors.OpPrereqError("Only one NIC add or remove operation"
9276
                                 " supported at a time", errors.ECODE_INVAL)
9277

    
9278
  def ExpandNames(self):
9279
    self._ExpandAndLockInstance()
9280
    self.needed_locks[locking.LEVEL_NODE] = []
9281
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
9282

    
9283
  def DeclareLocks(self, level):
9284
    if level == locking.LEVEL_NODE:
9285
      self._LockInstancesNodes()
9286
      if self.op.disk_template and self.op.remote_node:
9287
        self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
9288
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
9289

    
9290
  def BuildHooksEnv(self):
9291
    """Build hooks env.
9292

9293
    This runs on the master, primary and secondaries.
9294

9295
    """
9296
    args = dict()
9297
    if constants.BE_MEMORY in self.be_new:
9298
      args['memory'] = self.be_new[constants.BE_MEMORY]
9299
    if constants.BE_VCPUS in self.be_new:
9300
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
9301
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
9302
    # information at all.
9303
    if self.op.nics:
9304
      args['nics'] = []
9305
      nic_override = dict(self.op.nics)
9306
      for idx, nic in enumerate(self.instance.nics):
9307
        if idx in nic_override:
9308
          this_nic_override = nic_override[idx]
9309
        else:
9310
          this_nic_override = {}
9311
        if 'ip' in this_nic_override:
9312
          ip = this_nic_override['ip']
9313
        else:
9314
          ip = nic.ip
9315
        if 'mac' in this_nic_override:
9316
          mac = this_nic_override['mac']
9317
        else:
9318
          mac = nic.mac
9319
        if idx in self.nic_pnew:
9320
          nicparams = self.nic_pnew[idx]
9321
        else:
9322
          nicparams = self.cluster.SimpleFillNIC(nic.nicparams)
9323
        mode = nicparams[constants.NIC_MODE]
9324
        link = nicparams[constants.NIC_LINK]
9325
        args['nics'].append((ip, mac, mode, link))
9326
      if constants.DDM_ADD in nic_override:
9327
        ip = nic_override[constants.DDM_ADD].get('ip', None)
9328
        mac = nic_override[constants.DDM_ADD]['mac']
9329
        nicparams = self.nic_pnew[constants.DDM_ADD]
9330
        mode = nicparams[constants.NIC_MODE]
9331
        link = nicparams[constants.NIC_LINK]
9332
        args['nics'].append((ip, mac, mode, link))
9333
      elif constants.DDM_REMOVE in nic_override:
9334
        del args['nics'][-1]
9335

    
9336
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
9337
    if self.op.disk_template:
9338
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
9339
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
9340
    return env, nl, nl
9341

    
9342
  def CheckPrereq(self):
9343
    """Check prerequisites.
9344

9345
    This only checks the instance list against the existing names.
9346

9347
    """
9348
    # checking the new params on the primary/secondary nodes
9349

    
9350
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
9351
    cluster = self.cluster = self.cfg.GetClusterInfo()
9352
    assert self.instance is not None, \
9353
      "Cannot retrieve locked instance %s" % self.op.instance_name
9354
    pnode = instance.primary_node
9355
    nodelist = list(instance.all_nodes)
9356

    
9357
    # OS change
9358
    if self.op.os_name and not self.op.force:
9359
      _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
9360
                      self.op.force_variant)
9361
      instance_os = self.op.os_name
9362
    else:
9363
      instance_os = instance.os
9364

    
9365
    if self.op.disk_template:
9366
      if instance.disk_template == self.op.disk_template:
9367
        raise errors.OpPrereqError("Instance already has disk template %s" %
9368
                                   instance.disk_template, errors.ECODE_INVAL)
9369

    
9370
      if (instance.disk_template,
9371
          self.op.disk_template) not in self._DISK_CONVERSIONS:
9372
        raise errors.OpPrereqError("Unsupported disk template conversion from"
9373
                                   " %s to %s" % (instance.disk_template,
9374
                                                  self.op.disk_template),
9375
                                   errors.ECODE_INVAL)
9376
      _CheckInstanceDown(self, instance, "cannot change disk template")
9377
      if self.op.disk_template in constants.DTS_NET_MIRROR:
9378
        if self.op.remote_node == pnode:
9379
          raise errors.OpPrereqError("Given new secondary node %s is the same"
9380
                                     " as the primary node of the instance" %
9381
                                     self.op.remote_node, errors.ECODE_STATE)
9382
        _CheckNodeOnline(self, self.op.remote_node)
9383
        _CheckNodeNotDrained(self, self.op.remote_node)
9384
        # FIXME: here we assume that the old instance type is DT_PLAIN
9385
        assert instance.disk_template == constants.DT_PLAIN
9386
        disks = [{"size": d.size, "vg": d.logical_id[0]}
9387
                 for d in instance.disks]
9388
        required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
9389
        _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
9390

    
9391
    # hvparams processing
9392
    if self.op.hvparams:
9393
      hv_type = instance.hypervisor
9394
      i_hvdict = _GetUpdatedParams(instance.hvparams, self.op.hvparams)
9395
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
9396
      hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
9397

    
9398
      # local check
9399
      hypervisor.GetHypervisor(hv_type).CheckParameterSyntax(hv_new)
9400
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
9401
      self.hv_new = hv_new # the new actual values
9402
      self.hv_inst = i_hvdict # the new dict (without defaults)
9403
    else:
9404
      self.hv_new = self.hv_inst = {}
9405

    
9406
    # beparams processing
9407
    if self.op.beparams:
9408
      i_bedict = _GetUpdatedParams(instance.beparams, self.op.beparams,
9409
                                   use_none=True)
9410
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
9411
      be_new = cluster.SimpleFillBE(i_bedict)
9412
      self.be_new = be_new # the new actual values
9413
      self.be_inst = i_bedict # the new dict (without defaults)
9414
    else:
9415
      self.be_new = self.be_inst = {}
9416
    be_old = cluster.FillBE(instance)
9417

    
9418
    # osparams processing
9419
    if self.op.osparams:
9420
      i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
9421
      _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
9422
      self.os_inst = i_osdict # the new dict (without defaults)
9423
    else:
9424
      self.os_inst = {}
9425

    
9426
    self.warn = []
9427

    
9428
    if (constants.BE_MEMORY in self.op.beparams and not self.op.force and
9429
        be_new[constants.BE_MEMORY] > be_old[constants.BE_MEMORY]):
9430
      mem_check_list = [pnode]
9431
      if be_new[constants.BE_AUTO_BALANCE]:
9432
        # either we changed auto_balance to yes or it was from before
9433
        mem_check_list.extend(instance.secondary_nodes)
9434
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
9435
                                                  instance.hypervisor)
9436
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
9437
                                         instance.hypervisor)
9438
      pninfo = nodeinfo[pnode]
9439
      msg = pninfo.fail_msg
9440
      if msg:
9441
        # Assume the primary node is unreachable and go ahead
9442
        self.warn.append("Can't get info from primary node %s: %s" %
9443
                         (pnode,  msg))
9444
      elif not isinstance(pninfo.payload.get('memory_free', None), int):
9445
        self.warn.append("Node data from primary node %s doesn't contain"
9446
                         " free memory information" % pnode)
9447
      elif instance_info.fail_msg:
9448
        self.warn.append("Can't get instance runtime information: %s" %
9449
                        instance_info.fail_msg)
9450
      else:
9451
        if instance_info.payload:
9452
          current_mem = int(instance_info.payload['memory'])
9453
        else:
9454
          # Assume instance not running
9455
          # (there is a slight race condition here, but it's not very probable,
9456
          # and we have no other way to check)
9457
          current_mem = 0
9458
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
9459
                    pninfo.payload['memory_free'])
9460
        if miss_mem > 0:
9461
          raise errors.OpPrereqError("This change will prevent the instance"
9462
                                     " from starting, due to %d MB of memory"
9463
                                     " missing on its primary node" % miss_mem,
9464
                                     errors.ECODE_NORES)
9465

    
9466
      if be_new[constants.BE_AUTO_BALANCE]:
9467
        for node, nres in nodeinfo.items():
9468
          if node not in instance.secondary_nodes:
9469
            continue
9470
          nres.Raise("Can't get info from secondary node %s" % node,
9471
                     prereq=True, ecode=errors.ECODE_STATE)
9472
          if not isinstance(nres.payload.get('memory_free', None), int):
9473
            raise errors.OpPrereqError("Secondary node %s didn't return free"
9474
                                       " memory information" % node,
9475
                                       errors.ECODE_STATE)
9476
          elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
9477
            raise errors.OpPrereqError("This change will prevent the instance"
9478
                                       " from failover to its secondary node"
9479
                                       " %s, due to not enough memory" % node,
9480
                                       errors.ECODE_STATE)
9481

    
9482
    # NIC processing
9483
    self.nic_pnew = {}
9484
    self.nic_pinst = {}
9485
    for nic_op, nic_dict in self.op.nics:
9486
      if nic_op == constants.DDM_REMOVE:
9487
        if not instance.nics:
9488
          raise errors.OpPrereqError("Instance has no NICs, cannot remove",
9489
                                     errors.ECODE_INVAL)
9490
        continue
9491
      if nic_op != constants.DDM_ADD:
9492
        # an existing nic
9493
        if not instance.nics:
9494
          raise errors.OpPrereqError("Invalid NIC index %s, instance has"
9495
                                     " no NICs" % nic_op,
9496
                                     errors.ECODE_INVAL)
9497
        if nic_op < 0 or nic_op >= len(instance.nics):
9498
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
9499
                                     " are 0 to %d" %
9500
                                     (nic_op, len(instance.nics) - 1),
9501
                                     errors.ECODE_INVAL)
9502
        old_nic_params = instance.nics[nic_op].nicparams
9503
        old_nic_ip = instance.nics[nic_op].ip
9504
      else:
9505
        old_nic_params = {}
9506
        old_nic_ip = None
9507

    
9508
      update_params_dict = dict([(key, nic_dict[key])
9509
                                 for key in constants.NICS_PARAMETERS
9510
                                 if key in nic_dict])
9511

    
9512
      if 'bridge' in nic_dict:
9513
        update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
9514

    
9515
      new_nic_params = _GetUpdatedParams(old_nic_params,
9516
                                         update_params_dict)
9517
      utils.ForceDictType(new_nic_params, constants.NICS_PARAMETER_TYPES)
9518
      new_filled_nic_params = cluster.SimpleFillNIC(new_nic_params)
9519
      objects.NIC.CheckParameterSyntax(new_filled_nic_params)
9520
      self.nic_pinst[nic_op] = new_nic_params
9521
      self.nic_pnew[nic_op] = new_filled_nic_params
9522
      new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
9523

    
9524
      if new_nic_mode == constants.NIC_MODE_BRIDGED:
9525
        nic_bridge = new_filled_nic_params[constants.NIC_LINK]
9526
        msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
9527
        if msg:
9528
          msg = "Error checking bridges on node %s: %s" % (pnode, msg)
9529
          if self.op.force:
9530
            self.warn.append(msg)
9531
          else:
9532
            raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
9533
      if new_nic_mode == constants.NIC_MODE_ROUTED:
9534
        if 'ip' in nic_dict:
9535
          nic_ip = nic_dict['ip']
9536
        else:
9537
          nic_ip = old_nic_ip
9538
        if nic_ip is None:
9539
          raise errors.OpPrereqError('Cannot set the nic ip to None'
9540
                                     ' on a routed nic', errors.ECODE_INVAL)
9541
      if 'mac' in nic_dict:
9542
        nic_mac = nic_dict['mac']
9543
        if nic_mac is None:
9544
          raise errors.OpPrereqError('Cannot set the nic mac to None',
9545
                                     errors.ECODE_INVAL)
9546
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
9547
          # otherwise generate the mac
9548
          nic_dict['mac'] = self.cfg.GenerateMAC(self.proc.GetECId())
9549
        else:
9550
          # or validate/reserve the current one
9551
          try:
9552
            self.cfg.ReserveMAC(nic_mac, self.proc.GetECId())
9553
          except errors.ReservationError:
9554
            raise errors.OpPrereqError("MAC address %s already in use"
9555
                                       " in cluster" % nic_mac,
9556
                                       errors.ECODE_NOTUNIQUE)
9557

    
9558
    # DISK processing
9559
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
9560
      raise errors.OpPrereqError("Disk operations not supported for"
9561
                                 " diskless instances",
9562
                                 errors.ECODE_INVAL)
9563
    for disk_op, _ in self.op.disks:
9564
      if disk_op == constants.DDM_REMOVE:
9565
        if len(instance.disks) == 1:
9566
          raise errors.OpPrereqError("Cannot remove the last disk of"
9567
                                     " an instance", errors.ECODE_INVAL)
9568
        _CheckInstanceDown(self, instance, "cannot remove disks")
9569

    
9570
      if (disk_op == constants.DDM_ADD and
9571
          len(instance.disks) >= constants.MAX_DISKS):
9572
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
9573
                                   " add more" % constants.MAX_DISKS,
9574
                                   errors.ECODE_STATE)
9575
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
9576
        # an existing disk
9577
        if disk_op < 0 or disk_op >= len(instance.disks):
9578
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
9579
                                     " are 0 to %d" %
9580
                                     (disk_op, len(instance.disks)),
9581
                                     errors.ECODE_INVAL)
9582

    
9583
    return
9584

    
9585
  def _ConvertPlainToDrbd(self, feedback_fn):
9586
    """Converts an instance from plain to drbd.
9587

9588
    """
9589
    feedback_fn("Converting template to drbd")
9590
    instance = self.instance
9591
    pnode = instance.primary_node
9592
    snode = self.op.remote_node
9593

    
9594
    # create a fake disk info for _GenerateDiskTemplate
9595
    disk_info = [{"size": d.size, "mode": d.mode,
9596
                  "vg": d.logical_id[0]} for d in instance.disks]
9597
    new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
9598
                                      instance.name, pnode, [snode],
9599
                                      disk_info, None, None, 0, feedback_fn)
9600
    info = _GetInstanceInfoText(instance)
9601
    feedback_fn("Creating aditional volumes...")
9602
    # first, create the missing data and meta devices
9603
    for disk in new_disks:
9604
      # unfortunately this is... not too nice
9605
      _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
9606
                            info, True)
9607
      for child in disk.children:
9608
        _CreateSingleBlockDev(self, snode, instance, child, info, True)
9609
    # at this stage, all new LVs have been created, we can rename the
9610
    # old ones
9611
    feedback_fn("Renaming original volumes...")
9612
    rename_list = [(o, n.children[0].logical_id)
9613
                   for (o, n) in zip(instance.disks, new_disks)]
9614
    result = self.rpc.call_blockdev_rename(pnode, rename_list)
9615
    result.Raise("Failed to rename original LVs")
9616

    
9617
    feedback_fn("Initializing DRBD devices...")
9618
    # all child devices are in place, we can now create the DRBD devices
9619
    for disk in new_disks:
9620
      for node in [pnode, snode]:
9621
        f_create = node == pnode
9622
        _CreateSingleBlockDev(self, node, instance, disk, info, f_create)
9623

    
9624
    # at this point, the instance has been modified
9625
    instance.disk_template = constants.DT_DRBD8
9626
    instance.disks = new_disks
9627
    self.cfg.Update(instance, feedback_fn)
9628

    
9629
    # disks are created, waiting for sync
9630
    disk_abort = not _WaitForSync(self, instance,
9631
                                  oneshot=not self.op.wait_for_sync)
9632
    if disk_abort:
9633
      raise errors.OpExecError("There are some degraded disks for"
9634
                               " this instance, please cleanup manually")
9635

    
9636
  def _ConvertDrbdToPlain(self, feedback_fn):
9637
    """Converts an instance from drbd to plain.
9638

9639
    """
9640
    instance = self.instance
9641
    assert len(instance.secondary_nodes) == 1
9642
    pnode = instance.primary_node
9643
    snode = instance.secondary_nodes[0]
9644
    feedback_fn("Converting template to plain")
9645

    
9646
    old_disks = instance.disks
9647
    new_disks = [d.children[0] for d in old_disks]
9648

    
9649
    # copy over size and mode
9650
    for parent, child in zip(old_disks, new_disks):
9651
      child.size = parent.size
9652
      child.mode = parent.mode
9653

    
9654
    # update instance structure
9655
    instance.disks = new_disks
9656
    instance.disk_template = constants.DT_PLAIN
9657
    self.cfg.Update(instance, feedback_fn)
9658

    
9659
    feedback_fn("Removing volumes on the secondary node...")
9660
    for disk in old_disks:
9661
      self.cfg.SetDiskID(disk, snode)
9662
      msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
9663
      if msg:
9664
        self.LogWarning("Could not remove block device %s on node %s,"
9665
                        " continuing anyway: %s", disk.iv_name, snode, msg)
9666

    
9667
    feedback_fn("Removing unneeded volumes on the primary node...")
9668
    for idx, disk in enumerate(old_disks):
9669
      meta = disk.children[1]
9670
      self.cfg.SetDiskID(meta, pnode)
9671
      msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
9672
      if msg:
9673
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
9674
                        " continuing anyway: %s", idx, pnode, msg)
9675

    
9676
  def Exec(self, feedback_fn):
9677
    """Modifies an instance.
9678

9679
    All parameters take effect only at the next restart of the instance.
9680

9681
    """
9682
    # Process here the warnings from CheckPrereq, as we don't have a
9683
    # feedback_fn there.
9684
    for warn in self.warn:
9685
      feedback_fn("WARNING: %s" % warn)
9686

    
9687
    result = []
9688
    instance = self.instance
9689
    # disk changes
9690
    for disk_op, disk_dict in self.op.disks:
9691
      if disk_op == constants.DDM_REMOVE:
9692
        # remove the last disk
9693
        device = instance.disks.pop()
9694
        device_idx = len(instance.disks)
9695
        for node, disk in device.ComputeNodeTree(instance.primary_node):
9696
          self.cfg.SetDiskID(disk, node)
9697
          msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
9698
          if msg:
9699
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
9700
                            " continuing anyway", device_idx, node, msg)
9701
        result.append(("disk/%d" % device_idx, "remove"))
9702
      elif disk_op == constants.DDM_ADD:
9703
        # add a new disk
9704
        if instance.disk_template == constants.DT_FILE:
9705
          file_driver, file_path = instance.disks[0].logical_id
9706
          file_path = os.path.dirname(file_path)
9707
        else:
9708
          file_driver = file_path = None
9709
        disk_idx_base = len(instance.disks)
9710
        new_disk = _GenerateDiskTemplate(self,
9711
                                         instance.disk_template,
9712
                                         instance.name, instance.primary_node,
9713
                                         instance.secondary_nodes,
9714
                                         [disk_dict],
9715
                                         file_path,
9716
                                         file_driver,
9717
                                         disk_idx_base, feedback_fn)[0]
9718
        instance.disks.append(new_disk)
9719
        info = _GetInstanceInfoText(instance)
9720

    
9721
        logging.info("Creating volume %s for instance %s",
9722
                     new_disk.iv_name, instance.name)
9723
        # Note: this needs to be kept in sync with _CreateDisks
9724
        #HARDCODE
9725
        for node in instance.all_nodes:
9726
          f_create = node == instance.primary_node
9727
          try:
9728
            _CreateBlockDev(self, node, instance, new_disk,
9729
                            f_create, info, f_create)
9730
          except errors.OpExecError, err:
9731
            self.LogWarning("Failed to create volume %s (%s) on"
9732
                            " node %s: %s",
9733
                            new_disk.iv_name, new_disk, node, err)
9734
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
9735
                       (new_disk.size, new_disk.mode)))
9736
      else:
9737
        # change a given disk
9738
        instance.disks[disk_op].mode = disk_dict['mode']
9739
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
9740

    
9741
    if self.op.disk_template:
9742
      r_shut = _ShutdownInstanceDisks(self, instance)
9743
      if not r_shut:
9744
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
9745
                                 " proceed with disk template conversion")
9746
      mode = (instance.disk_template, self.op.disk_template)
9747
      try:
9748
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
9749
      except:
9750
        self.cfg.ReleaseDRBDMinors(instance.name)
9751
        raise
9752
      result.append(("disk_template", self.op.disk_template))
9753

    
9754
    # NIC changes
9755
    for nic_op, nic_dict in self.op.nics:
9756
      if nic_op == constants.DDM_REMOVE:
9757
        # remove the last nic
9758
        del instance.nics[-1]
9759
        result.append(("nic.%d" % len(instance.nics), "remove"))
9760
      elif nic_op == constants.DDM_ADD:
9761
        # mac and bridge should be set, by now
9762
        mac = nic_dict['mac']
9763
        ip = nic_dict.get('ip', None)
9764
        nicparams = self.nic_pinst[constants.DDM_ADD]
9765
        new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
9766
        instance.nics.append(new_nic)
9767
        result.append(("nic.%d" % (len(instance.nics) - 1),
9768
                       "add:mac=%s,ip=%s,mode=%s,link=%s" %
9769
                       (new_nic.mac, new_nic.ip,
9770
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
9771
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
9772
                       )))
9773
      else:
9774
        for key in 'mac', 'ip':
9775
          if key in nic_dict:
9776
            setattr(instance.nics[nic_op], key, nic_dict[key])
9777
        if nic_op in self.nic_pinst:
9778
          instance.nics[nic_op].nicparams = self.nic_pinst[nic_op]
9779
        for key, val in nic_dict.iteritems():
9780
          result.append(("nic.%s/%d" % (key, nic_op), val))
9781

    
9782
    # hvparams changes
9783
    if self.op.hvparams:
9784
      instance.hvparams = self.hv_inst
9785
      for key, val in self.op.hvparams.iteritems():
9786
        result.append(("hv/%s" % key, val))
9787

    
9788
    # beparams changes
9789
    if self.op.beparams:
9790
      instance.beparams = self.be_inst
9791
      for key, val in self.op.beparams.iteritems():
9792
        result.append(("be/%s" % key, val))
9793

    
9794
    # OS change
9795
    if self.op.os_name:
9796
      instance.os = self.op.os_name
9797

    
9798
    # osparams changes
9799
    if self.op.osparams:
9800
      instance.osparams = self.os_inst
9801
      for key, val in self.op.osparams.iteritems():
9802
        result.append(("os/%s" % key, val))
9803

    
9804
    self.cfg.Update(instance, feedback_fn)
9805

    
9806
    return result
9807

    
9808
  _DISK_CONVERSIONS = {
9809
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
9810
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
9811
    }
9812

    
9813

    
9814
class LUBackupQuery(NoHooksLU):
9815
  """Query the exports list
9816

9817
  """
9818
  REQ_BGL = False
9819

    
9820
  def ExpandNames(self):
9821
    self.needed_locks = {}
9822
    self.share_locks[locking.LEVEL_NODE] = 1
9823
    if not self.op.nodes:
9824
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9825
    else:
9826
      self.needed_locks[locking.LEVEL_NODE] = \
9827
        _GetWantedNodes(self, self.op.nodes)
9828

    
9829
  def Exec(self, feedback_fn):
9830
    """Compute the list of all the exported system images.
9831

9832
    @rtype: dict
9833
    @return: a dictionary with the structure node->(export-list)
9834
        where export-list is a list of the instances exported on
9835
        that node.
9836

9837
    """
9838
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
9839
    rpcresult = self.rpc.call_export_list(self.nodes)
9840
    result = {}
9841
    for node in rpcresult:
9842
      if rpcresult[node].fail_msg:
9843
        result[node] = False
9844
      else:
9845
        result[node] = rpcresult[node].payload
9846

    
9847
    return result
9848

    
9849

    
9850
class LUBackupPrepare(NoHooksLU):
9851
  """Prepares an instance for an export and returns useful information.
9852

9853
  """
9854
  REQ_BGL = False
9855

    
9856
  def ExpandNames(self):
9857
    self._ExpandAndLockInstance()
9858

    
9859
  def CheckPrereq(self):
9860
    """Check prerequisites.
9861

9862
    """
9863
    instance_name = self.op.instance_name
9864

    
9865
    self.instance = self.cfg.GetInstanceInfo(instance_name)
9866
    assert self.instance is not None, \
9867
          "Cannot retrieve locked instance %s" % self.op.instance_name
9868
    _CheckNodeOnline(self, self.instance.primary_node)
9869

    
9870
    self._cds = _GetClusterDomainSecret()
9871

    
9872
  def Exec(self, feedback_fn):
9873
    """Prepares an instance for an export.
9874

9875
    """
9876
    instance = self.instance
9877

    
9878
    if self.op.mode == constants.EXPORT_MODE_REMOTE:
9879
      salt = utils.GenerateSecret(8)
9880

    
9881
      feedback_fn("Generating X509 certificate on %s" % instance.primary_node)
9882
      result = self.rpc.call_x509_cert_create(instance.primary_node,
9883
                                              constants.RIE_CERT_VALIDITY)
9884
      result.Raise("Can't create X509 key and certificate on %s" % result.node)
9885

    
9886
      (name, cert_pem) = result.payload
9887

    
9888
      cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
9889
                                             cert_pem)
9890

    
9891
      return {
9892
        "handshake": masterd.instance.ComputeRemoteExportHandshake(self._cds),
9893
        "x509_key_name": (name, utils.Sha1Hmac(self._cds, name, salt=salt),
9894
                          salt),
9895
        "x509_ca": utils.SignX509Certificate(cert, self._cds, salt),
9896
        }
9897

    
9898
    return None
9899

    
9900

    
9901
class LUBackupExport(LogicalUnit):
9902
  """Export an instance to an image in the cluster.
9903

9904
  """
9905
  HPATH = "instance-export"
9906
  HTYPE = constants.HTYPE_INSTANCE
9907
  REQ_BGL = False
9908

    
9909
  def CheckArguments(self):
9910
    """Check the arguments.
9911

9912
    """
9913
    self.x509_key_name = self.op.x509_key_name
9914
    self.dest_x509_ca_pem = self.op.destination_x509_ca
9915

    
9916
    if self.op.mode == constants.EXPORT_MODE_REMOTE:
9917
      if not self.x509_key_name:
9918
        raise errors.OpPrereqError("Missing X509 key name for encryption",
9919
                                   errors.ECODE_INVAL)
9920

    
9921
      if not self.dest_x509_ca_pem:
9922
        raise errors.OpPrereqError("Missing destination X509 CA",
9923
                                   errors.ECODE_INVAL)
9924

    
9925
  def ExpandNames(self):
9926
    self._ExpandAndLockInstance()
9927

    
9928
    # Lock all nodes for local exports
9929
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
9930
      # FIXME: lock only instance primary and destination node
9931
      #
9932
      # Sad but true, for now we have do lock all nodes, as we don't know where
9933
      # the previous export might be, and in this LU we search for it and
9934
      # remove it from its current node. In the future we could fix this by:
9935
      #  - making a tasklet to search (share-lock all), then create the
9936
      #    new one, then one to remove, after
9937
      #  - removing the removal operation altogether
9938
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9939

    
9940
  def DeclareLocks(self, level):
9941
    """Last minute lock declaration."""
9942
    # All nodes are locked anyway, so nothing to do here.
9943

    
9944
  def BuildHooksEnv(self):
9945
    """Build hooks env.
9946

9947
    This will run on the master, primary node and target node.
9948

9949
    """
9950
    env = {
9951
      "EXPORT_MODE": self.op.mode,
9952
      "EXPORT_NODE": self.op.target_node,
9953
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
9954
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
9955
      # TODO: Generic function for boolean env variables
9956
      "REMOVE_INSTANCE": str(bool(self.op.remove_instance)),
9957
      }
9958

    
9959
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
9960

    
9961
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node]
9962

    
9963
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
9964
      nl.append(self.op.target_node)
9965

    
9966
    return env, nl, nl
9967

    
9968
  def CheckPrereq(self):
9969
    """Check prerequisites.
9970

9971
    This checks that the instance and node names are valid.
9972

9973
    """
9974
    instance_name = self.op.instance_name
9975

    
9976
    self.instance = self.cfg.GetInstanceInfo(instance_name)
9977
    assert self.instance is not None, \
9978
          "Cannot retrieve locked instance %s" % self.op.instance_name
9979
    _CheckNodeOnline(self, self.instance.primary_node)
9980

    
9981
    if (self.op.remove_instance and self.instance.admin_up and
9982
        not self.op.shutdown):
9983
      raise errors.OpPrereqError("Can not remove instance without shutting it"
9984
                                 " down before")
9985

    
9986
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
9987
      self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
9988
      self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
9989
      assert self.dst_node is not None
9990

    
9991
      _CheckNodeOnline(self, self.dst_node.name)
9992
      _CheckNodeNotDrained(self, self.dst_node.name)
9993

    
9994
      self._cds = None
9995
      self.dest_disk_info = None
9996
      self.dest_x509_ca = None
9997

    
9998
    elif self.op.mode == constants.EXPORT_MODE_REMOTE:
9999
      self.dst_node = None
10000

    
10001
      if len(self.op.target_node) != len(self.instance.disks):
10002
        raise errors.OpPrereqError(("Received destination information for %s"
10003
                                    " disks, but instance %s has %s disks") %
10004
                                   (len(self.op.target_node), instance_name,
10005
                                    len(self.instance.disks)),
10006
                                   errors.ECODE_INVAL)
10007

    
10008
      cds = _GetClusterDomainSecret()
10009

    
10010
      # Check X509 key name
10011
      try:
10012
        (key_name, hmac_digest, hmac_salt) = self.x509_key_name
10013
      except (TypeError, ValueError), err:
10014
        raise errors.OpPrereqError("Invalid data for X509 key name: %s" % err)
10015

    
10016
      if not utils.VerifySha1Hmac(cds, key_name, hmac_digest, salt=hmac_salt):
10017
        raise errors.OpPrereqError("HMAC for X509 key name is wrong",
10018
                                   errors.ECODE_INVAL)
10019

    
10020
      # Load and verify CA
10021
      try:
10022
        (cert, _) = utils.LoadSignedX509Certificate(self.dest_x509_ca_pem, cds)
10023
      except OpenSSL.crypto.Error, err:
10024
        raise errors.OpPrereqError("Unable to load destination X509 CA (%s)" %
10025
                                   (err, ), errors.ECODE_INVAL)
10026

    
10027
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
10028
      if errcode is not None:
10029
        raise errors.OpPrereqError("Invalid destination X509 CA (%s)" %
10030
                                   (msg, ), errors.ECODE_INVAL)
10031

    
10032
      self.dest_x509_ca = cert
10033

    
10034
      # Verify target information
10035
      disk_info = []
10036
      for idx, disk_data in enumerate(self.op.target_node):
10037
        try:
10038
          (host, port, magic) = \
10039
            masterd.instance.CheckRemoteExportDiskInfo(cds, idx, disk_data)
10040
        except errors.GenericError, err:
10041
          raise errors.OpPrereqError("Target info for disk %s: %s" %
10042
                                     (idx, err), errors.ECODE_INVAL)
10043

    
10044
        disk_info.append((host, port, magic))
10045

    
10046
      assert len(disk_info) == len(self.op.target_node)
10047
      self.dest_disk_info = disk_info
10048

    
10049
    else:
10050
      raise errors.ProgrammerError("Unhandled export mode %r" %
10051
                                   self.op.mode)
10052

    
10053
    # instance disk type verification
10054
    # TODO: Implement export support for file-based disks
10055
    for disk in self.instance.disks:
10056
      if disk.dev_type == constants.LD_FILE:
10057
        raise errors.OpPrereqError("Export not supported for instances with"
10058
                                   " file-based disks", errors.ECODE_INVAL)
10059

    
10060
  def _CleanupExports(self, feedback_fn):
10061
    """Removes exports of current instance from all other nodes.
10062

10063
    If an instance in a cluster with nodes A..D was exported to node C, its
10064
    exports will be removed from the nodes A, B and D.
10065

10066
    """
10067
    assert self.op.mode != constants.EXPORT_MODE_REMOTE
10068

    
10069
    nodelist = self.cfg.GetNodeList()
10070
    nodelist.remove(self.dst_node.name)
10071

    
10072
    # on one-node clusters nodelist will be empty after the removal
10073
    # if we proceed the backup would be removed because OpBackupQuery
10074
    # substitutes an empty list with the full cluster node list.
10075
    iname = self.instance.name
10076
    if nodelist:
10077
      feedback_fn("Removing old exports for instance %s" % iname)
10078
      exportlist = self.rpc.call_export_list(nodelist)
10079
      for node in exportlist:
10080
        if exportlist[node].fail_msg:
10081
          continue
10082
        if iname in exportlist[node].payload:
10083
          msg = self.rpc.call_export_remove(node, iname).fail_msg
10084
          if msg:
10085
            self.LogWarning("Could not remove older export for instance %s"
10086
                            " on node %s: %s", iname, node, msg)
10087

    
10088
  def Exec(self, feedback_fn):
10089
    """Export an instance to an image in the cluster.
10090

10091
    """
10092
    assert self.op.mode in constants.EXPORT_MODES
10093

    
10094
    instance = self.instance
10095
    src_node = instance.primary_node
10096

    
10097
    if self.op.shutdown:
10098
      # shutdown the instance, but not the disks
10099
      feedback_fn("Shutting down instance %s" % instance.name)
10100
      result = self.rpc.call_instance_shutdown(src_node, instance,
10101
                                               self.op.shutdown_timeout)
10102
      # TODO: Maybe ignore failures if ignore_remove_failures is set
10103
      result.Raise("Could not shutdown instance %s on"
10104
                   " node %s" % (instance.name, src_node))
10105

    
10106
    # set the disks ID correctly since call_instance_start needs the
10107
    # correct drbd minor to create the symlinks
10108
    for disk in instance.disks:
10109
      self.cfg.SetDiskID(disk, src_node)
10110

    
10111
    activate_disks = (not instance.admin_up)
10112

    
10113
    if activate_disks:
10114
      # Activate the instance disks if we'exporting a stopped instance
10115
      feedback_fn("Activating disks for %s" % instance.name)
10116
      _StartInstanceDisks(self, instance, None)
10117

    
10118
    try:
10119
      helper = masterd.instance.ExportInstanceHelper(self, feedback_fn,
10120
                                                     instance)
10121

    
10122
      helper.CreateSnapshots()
10123
      try:
10124
        if (self.op.shutdown and instance.admin_up and
10125
            not self.op.remove_instance):
10126
          assert not activate_disks
10127
          feedback_fn("Starting instance %s" % instance.name)
10128
          result = self.rpc.call_instance_start(src_node, instance, None, None)
10129
          msg = result.fail_msg
10130
          if msg:
10131
            feedback_fn("Failed to start instance: %s" % msg)
10132
            _ShutdownInstanceDisks(self, instance)
10133
            raise errors.OpExecError("Could not start instance: %s" % msg)
10134

    
10135
        if self.op.mode == constants.EXPORT_MODE_LOCAL:
10136
          (fin_resu, dresults) = helper.LocalExport(self.dst_node)
10137
        elif self.op.mode == constants.EXPORT_MODE_REMOTE:
10138
          connect_timeout = constants.RIE_CONNECT_TIMEOUT
10139
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
10140

    
10141
          (key_name, _, _) = self.x509_key_name
10142

    
10143
          dest_ca_pem = \
10144
            OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
10145
                                            self.dest_x509_ca)
10146

    
10147
          (fin_resu, dresults) = helper.RemoteExport(self.dest_disk_info,
10148
                                                     key_name, dest_ca_pem,
10149
                                                     timeouts)
10150
      finally:
10151
        helper.Cleanup()
10152

    
10153
      # Check for backwards compatibility
10154
      assert len(dresults) == len(instance.disks)
10155
      assert compat.all(isinstance(i, bool) for i in dresults), \
10156
             "Not all results are boolean: %r" % dresults
10157

    
10158
    finally:
10159
      if activate_disks:
10160
        feedback_fn("Deactivating disks for %s" % instance.name)
10161
        _ShutdownInstanceDisks(self, instance)
10162

    
10163
    if not (compat.all(dresults) and fin_resu):
10164
      failures = []
10165
      if not fin_resu:
10166
        failures.append("export finalization")
10167
      if not compat.all(dresults):
10168
        fdsk = utils.CommaJoin(idx for (idx, dsk) in enumerate(dresults)
10169
                               if not dsk)
10170
        failures.append("disk export: disk(s) %s" % fdsk)
10171

    
10172
      raise errors.OpExecError("Export failed, errors in %s" %
10173
                               utils.CommaJoin(failures))
10174

    
10175
    # At this point, the export was successful, we can cleanup/finish
10176

    
10177
    # Remove instance if requested
10178
    if self.op.remove_instance:
10179
      feedback_fn("Removing instance %s" % instance.name)
10180
      _RemoveInstance(self, feedback_fn, instance,
10181
                      self.op.ignore_remove_failures)
10182

    
10183
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
10184
      self._CleanupExports(feedback_fn)
10185

    
10186
    return fin_resu, dresults
10187

    
10188

    
10189
class LUBackupRemove(NoHooksLU):
10190
  """Remove exports related to the named instance.
10191

10192
  """
10193
  REQ_BGL = False
10194

    
10195
  def ExpandNames(self):
10196
    self.needed_locks = {}
10197
    # We need all nodes to be locked in order for RemoveExport to work, but we
10198
    # don't need to lock the instance itself, as nothing will happen to it (and
10199
    # we can remove exports also for a removed instance)
10200
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
10201

    
10202
  def Exec(self, feedback_fn):
10203
    """Remove any export.
10204

10205
    """
10206
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
10207
    # If the instance was not found we'll try with the name that was passed in.
10208
    # This will only work if it was an FQDN, though.
10209
    fqdn_warn = False
10210
    if not instance_name:
10211
      fqdn_warn = True
10212
      instance_name = self.op.instance_name
10213

    
10214
    locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
10215
    exportlist = self.rpc.call_export_list(locked_nodes)
10216
    found = False
10217
    for node in exportlist:
10218
      msg = exportlist[node].fail_msg
10219
      if msg:
10220
        self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
10221
        continue
10222
      if instance_name in exportlist[node].payload:
10223
        found = True
10224
        result = self.rpc.call_export_remove(node, instance_name)
10225
        msg = result.fail_msg
10226
        if msg:
10227
          logging.error("Could not remove export for instance %s"
10228
                        " on node %s: %s", instance_name, node, msg)
10229

    
10230
    if fqdn_warn and not found:
10231
      feedback_fn("Export not found. If trying to remove an export belonging"
10232
                  " to a deleted instance please use its Fully Qualified"
10233
                  " Domain Name.")
10234

    
10235

    
10236
class LUGroupAdd(LogicalUnit):
10237
  """Logical unit for creating node groups.
10238

10239
  """
10240
  HPATH = "group-add"
10241
  HTYPE = constants.HTYPE_GROUP
10242
  REQ_BGL = False
10243

    
10244
  def ExpandNames(self):
10245
    # We need the new group's UUID here so that we can create and acquire the
10246
    # corresponding lock. Later, in Exec(), we'll indicate to cfg.AddNodeGroup
10247
    # that it should not check whether the UUID exists in the configuration.
10248
    self.group_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
10249
    self.needed_locks = {}
10250
    self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
10251

    
10252
  def CheckPrereq(self):
10253
    """Check prerequisites.
10254

10255
    This checks that the given group name is not an existing node group
10256
    already.
10257

10258
    """
10259
    try:
10260
      existing_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10261
    except errors.OpPrereqError:
10262
      pass
10263
    else:
10264
      raise errors.OpPrereqError("Desired group name '%s' already exists as a"
10265
                                 " node group (UUID: %s)" %
10266
                                 (self.op.group_name, existing_uuid),
10267
                                 errors.ECODE_EXISTS)
10268

    
10269
    if self.op.ndparams:
10270
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
10271

    
10272
  def BuildHooksEnv(self):
10273
    """Build hooks env.
10274

10275
    """
10276
    env = {
10277
      "GROUP_NAME": self.op.group_name,
10278
      }
10279
    mn = self.cfg.GetMasterNode()
10280
    return env, [mn], [mn]
10281

    
10282
  def Exec(self, feedback_fn):
10283
    """Add the node group to the cluster.
10284

10285
    """
10286
    group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
10287
                                  uuid=self.group_uuid,
10288
                                  alloc_policy=self.op.alloc_policy,
10289
                                  ndparams=self.op.ndparams)
10290

    
10291
    self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
10292
    del self.remove_locks[locking.LEVEL_NODEGROUP]
10293

    
10294

    
10295
class LUGroupAssignNodes(NoHooksLU):
10296
  """Logical unit for assigning nodes to groups.
10297

10298
  """
10299
  REQ_BGL = False
10300

    
10301
  def ExpandNames(self):
10302
    # These raise errors.OpPrereqError on their own:
10303
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10304
    self.op.nodes = _GetWantedNodes(self, self.op.nodes)
10305

    
10306
    # We want to lock all the affected nodes and groups. We have readily
10307
    # available the list of nodes, and the *destination* group. To gather the
10308
    # list of "source" groups, we need to fetch node information later on.
10309
    self.needed_locks = {
10310
      locking.LEVEL_NODEGROUP: set([self.group_uuid]),
10311
      locking.LEVEL_NODE: self.op.nodes,
10312
      }
10313

    
10314
  def DeclareLocks(self, level):
10315
    if level == locking.LEVEL_NODEGROUP:
10316
      assert len(self.needed_locks[locking.LEVEL_NODEGROUP]) == 1
10317

    
10318
      # Try to get all affected nodes' groups without having the group or node
10319
      # lock yet. Needs verification later in the code flow.
10320
      groups = self.cfg.GetNodeGroupsFromNodes(self.op.nodes)
10321

    
10322
      self.needed_locks[locking.LEVEL_NODEGROUP].update(groups)
10323

    
10324
  def CheckPrereq(self):
10325
    """Check prerequisites.
10326

10327
    """
10328
    assert self.needed_locks[locking.LEVEL_NODEGROUP]
10329
    assert (frozenset(self.acquired_locks[locking.LEVEL_NODE]) ==
10330
            frozenset(self.op.nodes))
10331

    
10332
    expected_locks = (set([self.group_uuid]) |
10333
                      self.cfg.GetNodeGroupsFromNodes(self.op.nodes))
10334
    actual_locks = self.acquired_locks[locking.LEVEL_NODEGROUP]
10335
    if actual_locks != expected_locks:
10336
      raise errors.OpExecError("Nodes changed groups since locks were acquired,"
10337
                               " current groups are '%s', used to be '%s'" %
10338
                               (utils.CommaJoin(expected_locks),
10339
                                utils.CommaJoin(actual_locks)))
10340

    
10341
    self.node_data = self.cfg.GetAllNodesInfo()
10342
    self.group = self.cfg.GetNodeGroup(self.group_uuid)
10343
    instance_data = self.cfg.GetAllInstancesInfo()
10344

    
10345
    if self.group is None:
10346
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
10347
                               (self.op.group_name, self.group_uuid))
10348

    
10349
    (new_splits, previous_splits) = \
10350
      self.CheckAssignmentForSplitInstances([(node, self.group_uuid)
10351
                                             for node in self.op.nodes],
10352
                                            self.node_data, instance_data)
10353

    
10354
    if new_splits:
10355
      fmt_new_splits = utils.CommaJoin(utils.NiceSort(new_splits))
10356

    
10357
      if not self.op.force:
10358
        raise errors.OpExecError("The following instances get split by this"
10359
                                 " change and --force was not given: %s" %
10360
                                 fmt_new_splits)
10361
      else:
10362
        self.LogWarning("This operation will split the following instances: %s",
10363
                        fmt_new_splits)
10364

    
10365
        if previous_splits:
10366
          self.LogWarning("In addition, these already-split instances continue"
10367
                          " to be split across groups: %s",
10368
                          utils.CommaJoin(utils.NiceSort(previous_splits)))
10369

    
10370
  def Exec(self, feedback_fn):
10371
    """Assign nodes to a new group.
10372

10373
    """
10374
    for node in self.op.nodes:
10375
      self.node_data[node].group = self.group_uuid
10376

    
10377
    # FIXME: Depends on side-effects of modifying the result of
10378
    # C{cfg.GetAllNodesInfo}
10379

    
10380
    self.cfg.Update(self.group, feedback_fn) # Saves all modified nodes.
10381

    
10382
  @staticmethod
10383
  def CheckAssignmentForSplitInstances(changes, node_data, instance_data):
10384
    """Check for split instances after a node assignment.
10385

10386
    This method considers a series of node assignments as an atomic operation,
10387
    and returns information about split instances after applying the set of
10388
    changes.
10389

10390
    In particular, it returns information about newly split instances, and
10391
    instances that were already split, and remain so after the change.
10392

10393
    Only instances whose disk template is listed in constants.DTS_NET_MIRROR are
10394
    considered.
10395

10396
    @type changes: list of (node_name, new_group_uuid) pairs.
10397
    @param changes: list of node assignments to consider.
10398
    @param node_data: a dict with data for all nodes
10399
    @param instance_data: a dict with all instances to consider
10400
    @rtype: a two-tuple
10401
    @return: a list of instances that were previously okay and result split as a
10402
      consequence of this change, and a list of instances that were previously
10403
      split and this change does not fix.
10404

10405
    """
10406
    changed_nodes = dict((node, group) for node, group in changes
10407
                         if node_data[node].group != group)
10408

    
10409
    all_split_instances = set()
10410
    previously_split_instances = set()
10411

    
10412
    def InstanceNodes(instance):
10413
      return [instance.primary_node] + list(instance.secondary_nodes)
10414

    
10415
    for inst in instance_data.values():
10416
      if inst.disk_template not in constants.DTS_NET_MIRROR:
10417
        continue
10418

    
10419
      instance_nodes = InstanceNodes(inst)
10420

    
10421
      if len(set(node_data[node].group for node in instance_nodes)) > 1:
10422
        previously_split_instances.add(inst.name)
10423

    
10424
      if len(set(changed_nodes.get(node, node_data[node].group)
10425
                 for node in instance_nodes)) > 1:
10426
        all_split_instances.add(inst.name)
10427

    
10428
    return (list(all_split_instances - previously_split_instances),
10429
            list(previously_split_instances & all_split_instances))
10430

    
10431

    
10432
class _GroupQuery(_QueryBase):
10433

    
10434
  FIELDS = query.GROUP_FIELDS
10435

    
10436
  def ExpandNames(self, lu):
10437
    lu.needed_locks = {}
10438

    
10439
    self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
10440
    name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values())
10441

    
10442
    if not self.names:
10443
      self.wanted = [name_to_uuid[name]
10444
                     for name in utils.NiceSort(name_to_uuid.keys())]
10445
    else:
10446
      # Accept names to be either names or UUIDs.
10447
      missing = []
10448
      self.wanted = []
10449
      all_uuid = frozenset(self._all_groups.keys())
10450

    
10451
      for name in self.names:
10452
        if name in all_uuid:
10453
          self.wanted.append(name)
10454
        elif name in name_to_uuid:
10455
          self.wanted.append(name_to_uuid[name])
10456
        else:
10457
          missing.append(name)
10458

    
10459
      if missing:
10460
        raise errors.OpPrereqError("Some groups do not exist: %s" %
10461
                                   utils.CommaJoin(missing),
10462
                                   errors.ECODE_NOENT)
10463

    
10464
  def DeclareLocks(self, lu, level):
10465
    pass
10466

    
10467
  def _GetQueryData(self, lu):
10468
    """Computes the list of node groups and their attributes.
10469

10470
    """
10471
    do_nodes = query.GQ_NODE in self.requested_data
10472
    do_instances = query.GQ_INST in self.requested_data
10473

    
10474
    group_to_nodes = None
10475
    group_to_instances = None
10476

    
10477
    # For GQ_NODE, we need to map group->[nodes], and group->[instances] for
10478
    # GQ_INST. The former is attainable with just GetAllNodesInfo(), but for the
10479
    # latter GetAllInstancesInfo() is not enough, for we have to go through
10480
    # instance->node. Hence, we will need to process nodes even if we only need
10481
    # instance information.
10482
    if do_nodes or do_instances:
10483
      all_nodes = lu.cfg.GetAllNodesInfo()
10484
      group_to_nodes = dict((uuid, []) for uuid in self.wanted)
10485
      node_to_group = {}
10486

    
10487
      for node in all_nodes.values():
10488
        if node.group in group_to_nodes:
10489
          group_to_nodes[node.group].append(node.name)
10490
          node_to_group[node.name] = node.group
10491

    
10492
      if do_instances:
10493
        all_instances = lu.cfg.GetAllInstancesInfo()
10494
        group_to_instances = dict((uuid, []) for uuid in self.wanted)
10495

    
10496
        for instance in all_instances.values():
10497
          node = instance.primary_node
10498
          if node in node_to_group:
10499
            group_to_instances[node_to_group[node]].append(instance.name)
10500

    
10501
        if not do_nodes:
10502
          # Do not pass on node information if it was not requested.
10503
          group_to_nodes = None
10504

    
10505
    return query.GroupQueryData([self._all_groups[uuid]
10506
                                 for uuid in self.wanted],
10507
                                group_to_nodes, group_to_instances)
10508

    
10509

    
10510
class LUGroupQuery(NoHooksLU):
10511
  """Logical unit for querying node groups.
10512

10513
  """
10514
  REQ_BGL = False
10515

    
10516
  def CheckArguments(self):
10517
    self.gq = _GroupQuery(self.op.names, self.op.output_fields, False)
10518

    
10519
  def ExpandNames(self):
10520
    self.gq.ExpandNames(self)
10521

    
10522
  def Exec(self, feedback_fn):
10523
    return self.gq.OldStyleQuery(self)
10524

    
10525

    
10526
class LUGroupSetParams(LogicalUnit):
10527
  """Modifies the parameters of a node group.
10528

10529
  """
10530
  HPATH = "group-modify"
10531
  HTYPE = constants.HTYPE_GROUP
10532
  REQ_BGL = False
10533

    
10534
  def CheckArguments(self):
10535
    all_changes = [
10536
      self.op.ndparams,
10537
      self.op.alloc_policy,
10538
      ]
10539

    
10540
    if all_changes.count(None) == len(all_changes):
10541
      raise errors.OpPrereqError("Please pass at least one modification",
10542
                                 errors.ECODE_INVAL)
10543

    
10544
  def ExpandNames(self):
10545
    # This raises errors.OpPrereqError on its own:
10546
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10547

    
10548
    self.needed_locks = {
10549
      locking.LEVEL_NODEGROUP: [self.group_uuid],
10550
      }
10551

    
10552
  def CheckPrereq(self):
10553
    """Check prerequisites.
10554

10555
    """
10556
    self.group = self.cfg.GetNodeGroup(self.group_uuid)
10557

    
10558
    if self.group is None:
10559
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
10560
                               (self.op.group_name, self.group_uuid))
10561

    
10562
    if self.op.ndparams:
10563
      new_ndparams = _GetUpdatedParams(self.group.ndparams, self.op.ndparams)
10564
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
10565
      self.new_ndparams = new_ndparams
10566

    
10567
  def BuildHooksEnv(self):
10568
    """Build hooks env.
10569

10570
    """
10571
    env = {
10572
      "GROUP_NAME": self.op.group_name,
10573
      "NEW_ALLOC_POLICY": self.op.alloc_policy,
10574
      }
10575
    mn = self.cfg.GetMasterNode()
10576
    return env, [mn], [mn]
10577

    
10578
  def Exec(self, feedback_fn):
10579
    """Modifies the node group.
10580

10581
    """
10582
    result = []
10583

    
10584
    if self.op.ndparams:
10585
      self.group.ndparams = self.new_ndparams
10586
      result.append(("ndparams", str(self.group.ndparams)))
10587

    
10588
    if self.op.alloc_policy:
10589
      self.group.alloc_policy = self.op.alloc_policy
10590

    
10591
    self.cfg.Update(self.group, feedback_fn)
10592
    return result
10593

    
10594

    
10595

    
10596
class LUGroupRemove(LogicalUnit):
10597
  HPATH = "group-remove"
10598
  HTYPE = constants.HTYPE_GROUP
10599
  REQ_BGL = False
10600

    
10601
  def ExpandNames(self):
10602
    # This will raises errors.OpPrereqError on its own:
10603
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10604
    self.needed_locks = {
10605
      locking.LEVEL_NODEGROUP: [self.group_uuid],
10606
      }
10607

    
10608
  def CheckPrereq(self):
10609
    """Check prerequisites.
10610

10611
    This checks that the given group name exists as a node group, that is
10612
    empty (i.e., contains no nodes), and that is not the last group of the
10613
    cluster.
10614

10615
    """
10616
    # Verify that the group is empty.
10617
    group_nodes = [node.name
10618
                   for node in self.cfg.GetAllNodesInfo().values()
10619
                   if node.group == self.group_uuid]
10620

    
10621
    if group_nodes:
10622
      raise errors.OpPrereqError("Group '%s' not empty, has the following"
10623
                                 " nodes: %s" %
10624
                                 (self.op.group_name,
10625
                                  utils.CommaJoin(utils.NiceSort(group_nodes))),
10626
                                 errors.ECODE_STATE)
10627

    
10628
    # Verify the cluster would not be left group-less.
10629
    if len(self.cfg.GetNodeGroupList()) == 1:
10630
      raise errors.OpPrereqError("Group '%s' is the only group,"
10631
                                 " cannot be removed" %
10632
                                 self.op.group_name,
10633
                                 errors.ECODE_STATE)
10634

    
10635
  def BuildHooksEnv(self):
10636
    """Build hooks env.
10637

10638
    """
10639
    env = {
10640
      "GROUP_NAME": self.op.group_name,
10641
      }
10642
    mn = self.cfg.GetMasterNode()
10643
    return env, [mn], [mn]
10644

    
10645
  def Exec(self, feedback_fn):
10646
    """Remove the node group.
10647

10648
    """
10649
    try:
10650
      self.cfg.RemoveNodeGroup(self.group_uuid)
10651
    except errors.ConfigurationError:
10652
      raise errors.OpExecError("Group '%s' with UUID %s disappeared" %
10653
                               (self.op.group_name, self.group_uuid))
10654

    
10655
    self.remove_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
10656

    
10657

    
10658
class LUGroupRename(LogicalUnit):
10659
  HPATH = "group-rename"
10660
  HTYPE = constants.HTYPE_GROUP
10661
  REQ_BGL = False
10662

    
10663
  def ExpandNames(self):
10664
    # This raises errors.OpPrereqError on its own:
10665
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.old_name)
10666

    
10667
    self.needed_locks = {
10668
      locking.LEVEL_NODEGROUP: [self.group_uuid],
10669
      }
10670

    
10671
  def CheckPrereq(self):
10672
    """Check prerequisites.
10673

10674
    This checks that the given old_name exists as a node group, and that
10675
    new_name doesn't.
10676

10677
    """
10678
    try:
10679
      new_name_uuid = self.cfg.LookupNodeGroup(self.op.new_name)
10680
    except errors.OpPrereqError:
10681
      pass
10682
    else:
10683
      raise errors.OpPrereqError("Desired new name '%s' clashes with existing"
10684
                                 " node group (UUID: %s)" %
10685
                                 (self.op.new_name, new_name_uuid),
10686
                                 errors.ECODE_EXISTS)
10687

    
10688
  def BuildHooksEnv(self):
10689
    """Build hooks env.
10690

10691
    """
10692
    env = {
10693
      "OLD_NAME": self.op.old_name,
10694
      "NEW_NAME": self.op.new_name,
10695
      }
10696

    
10697
    mn = self.cfg.GetMasterNode()
10698
    all_nodes = self.cfg.GetAllNodesInfo()
10699
    run_nodes = [mn]
10700
    all_nodes.pop(mn, None)
10701

    
10702
    for node in all_nodes.values():
10703
      if node.group == self.group_uuid:
10704
        run_nodes.append(node.name)
10705

    
10706
    return env, run_nodes, run_nodes
10707

    
10708
  def Exec(self, feedback_fn):
10709
    """Rename the node group.
10710

10711
    """
10712
    group = self.cfg.GetNodeGroup(self.group_uuid)
10713

    
10714
    if group is None:
10715
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
10716
                               (self.op.old_name, self.group_uuid))
10717

    
10718
    group.name = self.op.new_name
10719
    self.cfg.Update(group, feedback_fn)
10720

    
10721
    return self.op.new_name
10722

    
10723

    
10724
class TagsLU(NoHooksLU): # pylint: disable-msg=W0223
10725
  """Generic tags LU.
10726

10727
  This is an abstract class which is the parent of all the other tags LUs.
10728

10729
  """
10730

    
10731
  def ExpandNames(self):
10732
    self.needed_locks = {}
10733
    if self.op.kind == constants.TAG_NODE:
10734
      self.op.name = _ExpandNodeName(self.cfg, self.op.name)
10735
      self.needed_locks[locking.LEVEL_NODE] = self.op.name
10736
    elif self.op.kind == constants.TAG_INSTANCE:
10737
      self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
10738
      self.needed_locks[locking.LEVEL_INSTANCE] = self.op.name
10739

    
10740
    # FIXME: Acquire BGL for cluster tag operations (as of this writing it's
10741
    # not possible to acquire the BGL based on opcode parameters)
10742

    
10743
  def CheckPrereq(self):
10744
    """Check prerequisites.
10745

10746
    """
10747
    if self.op.kind == constants.TAG_CLUSTER:
10748
      self.target = self.cfg.GetClusterInfo()
10749
    elif self.op.kind == constants.TAG_NODE:
10750
      self.target = self.cfg.GetNodeInfo(self.op.name)
10751
    elif self.op.kind == constants.TAG_INSTANCE:
10752
      self.target = self.cfg.GetInstanceInfo(self.op.name)
10753
    else:
10754
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
10755
                                 str(self.op.kind), errors.ECODE_INVAL)
10756

    
10757

    
10758
class LUTagsGet(TagsLU):
10759
  """Returns the tags of a given object.
10760

10761
  """
10762
  REQ_BGL = False
10763

    
10764
  def ExpandNames(self):
10765
    TagsLU.ExpandNames(self)
10766

    
10767
    # Share locks as this is only a read operation
10768
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
10769

    
10770
  def Exec(self, feedback_fn):
10771
    """Returns the tag list.
10772

10773
    """
10774
    return list(self.target.GetTags())
10775

    
10776

    
10777
class LUTagsSearch(NoHooksLU):
10778
  """Searches the tags for a given pattern.
10779

10780
  """
10781
  REQ_BGL = False
10782

    
10783
  def ExpandNames(self):
10784
    self.needed_locks = {}
10785

    
10786
  def CheckPrereq(self):
10787
    """Check prerequisites.
10788

10789
    This checks the pattern passed for validity by compiling it.
10790

10791
    """
10792
    try:
10793
      self.re = re.compile(self.op.pattern)
10794
    except re.error, err:
10795
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
10796
                                 (self.op.pattern, err), errors.ECODE_INVAL)
10797

    
10798
  def Exec(self, feedback_fn):
10799
    """Returns the tag list.
10800

10801
    """
10802
    cfg = self.cfg
10803
    tgts = [("/cluster", cfg.GetClusterInfo())]
10804
    ilist = cfg.GetAllInstancesInfo().values()
10805
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
10806
    nlist = cfg.GetAllNodesInfo().values()
10807
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
10808
    results = []
10809
    for path, target in tgts:
10810
      for tag in target.GetTags():
10811
        if self.re.search(tag):
10812
          results.append((path, tag))
10813
    return results
10814

    
10815

    
10816
class LUTagsSet(TagsLU):
10817
  """Sets a tag on a given object.
10818

10819
  """
10820
  REQ_BGL = False
10821

    
10822
  def CheckPrereq(self):
10823
    """Check prerequisites.
10824

10825
    This checks the type and length of the tag name and value.
10826

10827
    """
10828
    TagsLU.CheckPrereq(self)
10829
    for tag in self.op.tags:
10830
      objects.TaggableObject.ValidateTag(tag)
10831

    
10832
  def Exec(self, feedback_fn):
10833
    """Sets the tag.
10834

10835
    """
10836
    try:
10837
      for tag in self.op.tags:
10838
        self.target.AddTag(tag)
10839
    except errors.TagError, err:
10840
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
10841
    self.cfg.Update(self.target, feedback_fn)
10842

    
10843

    
10844
class LUTagsDel(TagsLU):
10845
  """Delete a list of tags from a given object.
10846

10847
  """
10848
  REQ_BGL = False
10849

    
10850
  def CheckPrereq(self):
10851
    """Check prerequisites.
10852

10853
    This checks that we have the given tag.
10854

10855
    """
10856
    TagsLU.CheckPrereq(self)
10857
    for tag in self.op.tags:
10858
      objects.TaggableObject.ValidateTag(tag)
10859
    del_tags = frozenset(self.op.tags)
10860
    cur_tags = self.target.GetTags()
10861

    
10862
    diff_tags = del_tags - cur_tags
10863
    if diff_tags:
10864
      diff_names = ("'%s'" % i for i in sorted(diff_tags))
10865
      raise errors.OpPrereqError("Tag(s) %s not found" %
10866
                                 (utils.CommaJoin(diff_names), ),
10867
                                 errors.ECODE_NOENT)
10868

    
10869
  def Exec(self, feedback_fn):
10870
    """Remove the tag from the object.
10871

10872
    """
10873
    for tag in self.op.tags:
10874
      self.target.RemoveTag(tag)
10875
    self.cfg.Update(self.target, feedback_fn)
10876

    
10877

    
10878
class LUTestDelay(NoHooksLU):
10879
  """Sleep for a specified amount of time.
10880

10881
  This LU sleeps on the master and/or nodes for a specified amount of
10882
  time.
10883

10884
  """
10885
  REQ_BGL = False
10886

    
10887
  def ExpandNames(self):
10888
    """Expand names and set required locks.
10889

10890
    This expands the node list, if any.
10891

10892
    """
10893
    self.needed_locks = {}
10894
    if self.op.on_nodes:
10895
      # _GetWantedNodes can be used here, but is not always appropriate to use
10896
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
10897
      # more information.
10898
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
10899
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
10900

    
10901
  def _TestDelay(self):
10902
    """Do the actual sleep.
10903

10904
    """
10905
    if self.op.on_master:
10906
      if not utils.TestDelay(self.op.duration):
10907
        raise errors.OpExecError("Error during master delay test")
10908
    if self.op.on_nodes:
10909
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
10910
      for node, node_result in result.items():
10911
        node_result.Raise("Failure during rpc call to node %s" % node)
10912

    
10913
  def Exec(self, feedback_fn):
10914
    """Execute the test delay opcode, with the wanted repetitions.
10915

10916
    """
10917
    if self.op.repeat == 0:
10918
      self._TestDelay()
10919
    else:
10920
      top_value = self.op.repeat - 1
10921
      for i in range(self.op.repeat):
10922
        self.LogInfo("Test delay iteration %d/%d" % (i, top_value))
10923
        self._TestDelay()
10924

    
10925

    
10926
class LUTestJqueue(NoHooksLU):
10927
  """Utility LU to test some aspects of the job queue.
10928

10929
  """
10930
  REQ_BGL = False
10931

    
10932
  # Must be lower than default timeout for WaitForJobChange to see whether it
10933
  # notices changed jobs
10934
  _CLIENT_CONNECT_TIMEOUT = 20.0
10935
  _CLIENT_CONFIRM_TIMEOUT = 60.0
10936

    
10937
  @classmethod
10938
  def _NotifyUsingSocket(cls, cb, errcls):
10939
    """Opens a Unix socket and waits for another program to connect.
10940

10941
    @type cb: callable
10942
    @param cb: Callback to send socket name to client
10943
    @type errcls: class
10944
    @param errcls: Exception class to use for errors
10945

10946
    """
10947
    # Using a temporary directory as there's no easy way to create temporary
10948
    # sockets without writing a custom loop around tempfile.mktemp and
10949
    # socket.bind
10950
    tmpdir = tempfile.mkdtemp()
10951
    try:
10952
      tmpsock = utils.PathJoin(tmpdir, "sock")
10953

    
10954
      logging.debug("Creating temporary socket at %s", tmpsock)
10955
      sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
10956
      try:
10957
        sock.bind(tmpsock)
10958
        sock.listen(1)
10959

    
10960
        # Send details to client
10961
        cb(tmpsock)
10962

    
10963
        # Wait for client to connect before continuing
10964
        sock.settimeout(cls._CLIENT_CONNECT_TIMEOUT)
10965
        try:
10966
          (conn, _) = sock.accept()
10967
        except socket.error, err:
10968
          raise errcls("Client didn't connect in time (%s)" % err)
10969
      finally:
10970
        sock.close()
10971
    finally:
10972
      # Remove as soon as client is connected
10973
      shutil.rmtree(tmpdir)
10974

    
10975
    # Wait for client to close
10976
    try:
10977
      try:
10978
        # pylint: disable-msg=E1101
10979
        # Instance of '_socketobject' has no ... member
10980
        conn.settimeout(cls._CLIENT_CONFIRM_TIMEOUT)
10981
        conn.recv(1)
10982
      except socket.error, err:
10983
        raise errcls("Client failed to confirm notification (%s)" % err)
10984
    finally:
10985
      conn.close()
10986

    
10987
  def _SendNotification(self, test, arg, sockname):
10988
    """Sends a notification to the client.
10989

10990
    @type test: string
10991
    @param test: Test name
10992
    @param arg: Test argument (depends on test)
10993
    @type sockname: string
10994
    @param sockname: Socket path
10995

10996
    """
10997
    self.Log(constants.ELOG_JQUEUE_TEST, (sockname, test, arg))
10998

    
10999
  def _Notify(self, prereq, test, arg):
11000
    """Notifies the client of a test.
11001

11002
    @type prereq: bool
11003
    @param prereq: Whether this is a prereq-phase test
11004
    @type test: string
11005
    @param test: Test name
11006
    @param arg: Test argument (depends on test)
11007

11008
    """
11009
    if prereq:
11010
      errcls = errors.OpPrereqError
11011
    else:
11012
      errcls = errors.OpExecError
11013

    
11014
    return self._NotifyUsingSocket(compat.partial(self._SendNotification,
11015
                                                  test, arg),
11016
                                   errcls)
11017

    
11018
  def CheckArguments(self):
11019
    self.checkargs_calls = getattr(self, "checkargs_calls", 0) + 1
11020
    self.expandnames_calls = 0
11021

    
11022
  def ExpandNames(self):
11023
    checkargs_calls = getattr(self, "checkargs_calls", 0)
11024
    if checkargs_calls < 1:
11025
      raise errors.ProgrammerError("CheckArguments was not called")
11026

    
11027
    self.expandnames_calls += 1
11028

    
11029
    if self.op.notify_waitlock:
11030
      self._Notify(True, constants.JQT_EXPANDNAMES, None)
11031

    
11032
    self.LogInfo("Expanding names")
11033

    
11034
    # Get lock on master node (just to get a lock, not for a particular reason)
11035
    self.needed_locks = {
11036
      locking.LEVEL_NODE: self.cfg.GetMasterNode(),
11037
      }
11038

    
11039
  def Exec(self, feedback_fn):
11040
    if self.expandnames_calls < 1:
11041
      raise errors.ProgrammerError("ExpandNames was not called")
11042

    
11043
    if self.op.notify_exec:
11044
      self._Notify(False, constants.JQT_EXEC, None)
11045

    
11046
    self.LogInfo("Executing")
11047

    
11048
    if self.op.log_messages:
11049
      self._Notify(False, constants.JQT_STARTMSG, len(self.op.log_messages))
11050
      for idx, msg in enumerate(self.op.log_messages):
11051
        self.LogInfo("Sending log message %s", idx + 1)
11052
        feedback_fn(constants.JQT_MSGPREFIX + msg)
11053
        # Report how many test messages have been sent
11054
        self._Notify(False, constants.JQT_LOGMSG, idx + 1)
11055

    
11056
    if self.op.fail:
11057
      raise errors.OpExecError("Opcode failure was requested")
11058

    
11059
    return True
11060

    
11061

    
11062
class IAllocator(object):
11063
  """IAllocator framework.
11064

11065
  An IAllocator instance has three sets of attributes:
11066
    - cfg that is needed to query the cluster
11067
    - input data (all members of the _KEYS class attribute are required)
11068
    - four buffer attributes (in|out_data|text), that represent the
11069
      input (to the external script) in text and data structure format,
11070
      and the output from it, again in two formats
11071
    - the result variables from the script (success, info, nodes) for
11072
      easy usage
11073

11074
  """
11075
  # pylint: disable-msg=R0902
11076
  # lots of instance attributes
11077
  _ALLO_KEYS = [
11078
    "name", "mem_size", "disks", "disk_template",
11079
    "os", "tags", "nics", "vcpus", "hypervisor",
11080
    ]
11081
  _RELO_KEYS = [
11082
    "name", "relocate_from",
11083
    ]
11084
  _EVAC_KEYS = [
11085
    "evac_nodes",
11086
    ]
11087

    
11088
  def __init__(self, cfg, rpc, mode, **kwargs):
11089
    self.cfg = cfg
11090
    self.rpc = rpc
11091
    # init buffer variables
11092
    self.in_text = self.out_text = self.in_data = self.out_data = None
11093
    # init all input fields so that pylint is happy
11094
    self.mode = mode
11095
    self.mem_size = self.disks = self.disk_template = None
11096
    self.os = self.tags = self.nics = self.vcpus = None
11097
    self.hypervisor = None
11098
    self.relocate_from = None
11099
    self.name = None
11100
    self.evac_nodes = None
11101
    # computed fields
11102
    self.required_nodes = None
11103
    # init result fields
11104
    self.success = self.info = self.result = None
11105
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
11106
      keyset = self._ALLO_KEYS
11107
      fn = self._AddNewInstance
11108
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
11109
      keyset = self._RELO_KEYS
11110
      fn = self._AddRelocateInstance
11111
    elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
11112
      keyset = self._EVAC_KEYS
11113
      fn = self._AddEvacuateNodes
11114
    else:
11115
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
11116
                                   " IAllocator" % self.mode)
11117
    for key in kwargs:
11118
      if key not in keyset:
11119
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
11120
                                     " IAllocator" % key)
11121
      setattr(self, key, kwargs[key])
11122

    
11123
    for key in keyset:
11124
      if key not in kwargs:
11125
        raise errors.ProgrammerError("Missing input parameter '%s' to"
11126
                                     " IAllocator" % key)
11127
    self._BuildInputData(fn)
11128

    
11129
  def _ComputeClusterData(self):
11130
    """Compute the generic allocator input data.
11131

11132
    This is the data that is independent of the actual operation.
11133

11134
    """
11135
    cfg = self.cfg
11136
    cluster_info = cfg.GetClusterInfo()
11137
    # cluster data
11138
    data = {
11139
      "version": constants.IALLOCATOR_VERSION,
11140
      "cluster_name": cfg.GetClusterName(),
11141
      "cluster_tags": list(cluster_info.GetTags()),
11142
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
11143
      # we don't have job IDs
11144
      }
11145
    ninfo = cfg.GetAllNodesInfo()
11146
    iinfo = cfg.GetAllInstancesInfo().values()
11147
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
11148

    
11149
    # node data
11150
    node_list = [n.name for n in ninfo.values() if n.vm_capable]
11151

    
11152
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
11153
      hypervisor_name = self.hypervisor
11154
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
11155
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
11156
    elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
11157
      hypervisor_name = cluster_info.enabled_hypervisors[0]
11158

    
11159
    node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
11160
                                        hypervisor_name)
11161
    node_iinfo = \
11162
      self.rpc.call_all_instances_info(node_list,
11163
                                       cluster_info.enabled_hypervisors)
11164

    
11165
    data["nodegroups"] = self._ComputeNodeGroupData(cfg)
11166

    
11167
    config_ndata = self._ComputeBasicNodeData(ninfo)
11168
    data["nodes"] = self._ComputeDynamicNodeData(ninfo, node_data, node_iinfo,
11169
                                                 i_list, config_ndata)
11170
    assert len(data["nodes"]) == len(ninfo), \
11171
        "Incomplete node data computed"
11172

    
11173
    data["instances"] = self._ComputeInstanceData(cluster_info, i_list)
11174

    
11175
    self.in_data = data
11176

    
11177
  @staticmethod
11178
  def _ComputeNodeGroupData(cfg):
11179
    """Compute node groups data.
11180

11181
    """
11182
    ng = {}
11183
    for guuid, gdata in cfg.GetAllNodeGroupsInfo().items():
11184
      ng[guuid] = {
11185
        "name": gdata.name,
11186
        "alloc_policy": gdata.alloc_policy,
11187
        }
11188
    return ng
11189

    
11190
  @staticmethod
11191
  def _ComputeBasicNodeData(node_cfg):
11192
    """Compute global node data.
11193

11194
    @rtype: dict
11195
    @returns: a dict of name: (node dict, node config)
11196

11197
    """
11198
    node_results = {}
11199
    for ninfo in node_cfg.values():
11200
      # fill in static (config-based) values
11201
      pnr = {
11202
        "tags": list(ninfo.GetTags()),
11203
        "primary_ip": ninfo.primary_ip,
11204
        "secondary_ip": ninfo.secondary_ip,
11205
        "offline": ninfo.offline,
11206
        "drained": ninfo.drained,
11207
        "master_candidate": ninfo.master_candidate,
11208
        "group": ninfo.group,
11209
        "master_capable": ninfo.master_capable,
11210
        "vm_capable": ninfo.vm_capable,
11211
        }
11212

    
11213
      node_results[ninfo.name] = pnr
11214

    
11215
    return node_results
11216

    
11217
  @staticmethod
11218
  def _ComputeDynamicNodeData(node_cfg, node_data, node_iinfo, i_list,
11219
                              node_results):
11220
    """Compute global node data.
11221

11222
    @param node_results: the basic node structures as filled from the config
11223

11224
    """
11225
    # make a copy of the current dict
11226
    node_results = dict(node_results)
11227
    for nname, nresult in node_data.items():
11228
      assert nname in node_results, "Missing basic data for node %s" % nname
11229
      ninfo = node_cfg[nname]
11230

    
11231
      if not (ninfo.offline or ninfo.drained):
11232
        nresult.Raise("Can't get data for node %s" % nname)
11233
        node_iinfo[nname].Raise("Can't get node instance info from node %s" %
11234
                                nname)
11235
        remote_info = nresult.payload
11236

    
11237
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
11238
                     'vg_size', 'vg_free', 'cpu_total']:
11239
          if attr not in remote_info:
11240
            raise errors.OpExecError("Node '%s' didn't return attribute"
11241
                                     " '%s'" % (nname, attr))
11242
          if not isinstance(remote_info[attr], int):
11243
            raise errors.OpExecError("Node '%s' returned invalid value"
11244
                                     " for '%s': %s" %
11245
                                     (nname, attr, remote_info[attr]))
11246
        # compute memory used by primary instances
11247
        i_p_mem = i_p_up_mem = 0
11248
        for iinfo, beinfo in i_list:
11249
          if iinfo.primary_node == nname:
11250
            i_p_mem += beinfo[constants.BE_MEMORY]
11251
            if iinfo.name not in node_iinfo[nname].payload:
11252
              i_used_mem = 0
11253
            else:
11254
              i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory'])
11255
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
11256
            remote_info['memory_free'] -= max(0, i_mem_diff)
11257

    
11258
            if iinfo.admin_up:
11259
              i_p_up_mem += beinfo[constants.BE_MEMORY]
11260

    
11261
        # compute memory used by instances
11262
        pnr_dyn = {
11263
          "total_memory": remote_info['memory_total'],
11264
          "reserved_memory": remote_info['memory_dom0'],
11265
          "free_memory": remote_info['memory_free'],
11266
          "total_disk": remote_info['vg_size'],
11267
          "free_disk": remote_info['vg_free'],
11268
          "total_cpus": remote_info['cpu_total'],
11269
          "i_pri_memory": i_p_mem,
11270
          "i_pri_up_memory": i_p_up_mem,
11271
          }
11272
        pnr_dyn.update(node_results[nname])
11273
        node_results[nname] = pnr_dyn
11274

    
11275
    return node_results
11276

    
11277
  @staticmethod
11278
  def _ComputeInstanceData(cluster_info, i_list):
11279
    """Compute global instance data.
11280

11281
    """
11282
    instance_data = {}
11283
    for iinfo, beinfo in i_list:
11284
      nic_data = []
11285
      for nic in iinfo.nics:
11286
        filled_params = cluster_info.SimpleFillNIC(nic.nicparams)
11287
        nic_dict = {"mac": nic.mac,
11288
                    "ip": nic.ip,
11289
                    "mode": filled_params[constants.NIC_MODE],
11290
                    "link": filled_params[constants.NIC_LINK],
11291
                   }
11292
        if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
11293
          nic_dict["bridge"] = filled_params[constants.NIC_LINK]
11294
        nic_data.append(nic_dict)
11295
      pir = {
11296
        "tags": list(iinfo.GetTags()),
11297
        "admin_up": iinfo.admin_up,
11298
        "vcpus": beinfo[constants.BE_VCPUS],
11299
        "memory": beinfo[constants.BE_MEMORY],
11300
        "os": iinfo.os,
11301
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
11302
        "nics": nic_data,
11303
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
11304
        "disk_template": iinfo.disk_template,
11305
        "hypervisor": iinfo.hypervisor,
11306
        }
11307
      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
11308
                                                 pir["disks"])
11309
      instance_data[iinfo.name] = pir
11310

    
11311
    return instance_data
11312

    
11313
  def _AddNewInstance(self):
11314
    """Add new instance data to allocator structure.
11315

11316
    This in combination with _AllocatorGetClusterData will create the
11317
    correct structure needed as input for the allocator.
11318

11319
    The checks for the completeness of the opcode must have already been
11320
    done.
11321

11322
    """
11323
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
11324

    
11325
    if self.disk_template in constants.DTS_NET_MIRROR:
11326
      self.required_nodes = 2
11327
    else:
11328
      self.required_nodes = 1
11329
    request = {
11330
      "name": self.name,
11331
      "disk_template": self.disk_template,
11332
      "tags": self.tags,
11333
      "os": self.os,
11334
      "vcpus": self.vcpus,
11335
      "memory": self.mem_size,
11336
      "disks": self.disks,
11337
      "disk_space_total": disk_space,
11338
      "nics": self.nics,
11339
      "required_nodes": self.required_nodes,
11340
      }
11341
    return request
11342

    
11343
  def _AddRelocateInstance(self):
11344
    """Add relocate instance data to allocator structure.
11345

11346
    This in combination with _IAllocatorGetClusterData will create the
11347
    correct structure needed as input for the allocator.
11348

11349
    The checks for the completeness of the opcode must have already been
11350
    done.
11351

11352
    """
11353
    instance = self.cfg.GetInstanceInfo(self.name)
11354
    if instance is None:
11355
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
11356
                                   " IAllocator" % self.name)
11357

    
11358
    if instance.disk_template not in constants.DTS_NET_MIRROR:
11359
      raise errors.OpPrereqError("Can't relocate non-mirrored instances",
11360
                                 errors.ECODE_INVAL)
11361

    
11362
    if len(instance.secondary_nodes) != 1:
11363
      raise errors.OpPrereqError("Instance has not exactly one secondary node",
11364
                                 errors.ECODE_STATE)
11365

    
11366
    self.required_nodes = 1
11367
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
11368
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
11369

    
11370
    request = {
11371
      "name": self.name,
11372
      "disk_space_total": disk_space,
11373
      "required_nodes": self.required_nodes,
11374
      "relocate_from": self.relocate_from,
11375
      }
11376
    return request
11377

    
11378
  def _AddEvacuateNodes(self):
11379
    """Add evacuate nodes data to allocator structure.
11380

11381
    """
11382
    request = {
11383
      "evac_nodes": self.evac_nodes
11384
      }
11385
    return request
11386

    
11387
  def _BuildInputData(self, fn):
11388
    """Build input data structures.
11389

11390
    """
11391
    self._ComputeClusterData()
11392

    
11393
    request = fn()
11394
    request["type"] = self.mode
11395
    self.in_data["request"] = request
11396

    
11397
    self.in_text = serializer.Dump(self.in_data)
11398

    
11399
  def Run(self, name, validate=True, call_fn=None):
11400
    """Run an instance allocator and return the results.
11401

11402
    """
11403
    if call_fn is None:
11404
      call_fn = self.rpc.call_iallocator_runner
11405

    
11406
    result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
11407
    result.Raise("Failure while running the iallocator script")
11408

    
11409
    self.out_text = result.payload
11410
    if validate:
11411
      self._ValidateResult()
11412

    
11413
  def _ValidateResult(self):
11414
    """Process the allocator results.
11415

11416
    This will process and if successful save the result in
11417
    self.out_data and the other parameters.
11418

11419
    """
11420
    try:
11421
      rdict = serializer.Load(self.out_text)
11422
    except Exception, err:
11423
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
11424

    
11425
    if not isinstance(rdict, dict):
11426
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
11427

    
11428
    # TODO: remove backwards compatiblity in later versions
11429
    if "nodes" in rdict and "result" not in rdict:
11430
      rdict["result"] = rdict["nodes"]
11431
      del rdict["nodes"]
11432

    
11433
    for key in "success", "info", "result":
11434
      if key not in rdict:
11435
        raise errors.OpExecError("Can't parse iallocator results:"
11436
                                 " missing key '%s'" % key)
11437
      setattr(self, key, rdict[key])
11438

    
11439
    if not isinstance(rdict["result"], list):
11440
      raise errors.OpExecError("Can't parse iallocator results: 'result' key"
11441
                               " is not a list")
11442
    self.out_data = rdict
11443

    
11444

    
11445
class LUTestAllocator(NoHooksLU):
11446
  """Run allocator tests.
11447

11448
  This LU runs the allocator tests
11449

11450
  """
11451
  def CheckPrereq(self):
11452
    """Check prerequisites.
11453

11454
    This checks the opcode parameters depending on the director and mode test.
11455

11456
    """
11457
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
11458
      for attr in ["mem_size", "disks", "disk_template",
11459
                   "os", "tags", "nics", "vcpus"]:
11460
        if not hasattr(self.op, attr):
11461
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
11462
                                     attr, errors.ECODE_INVAL)
11463
      iname = self.cfg.ExpandInstanceName(self.op.name)
11464
      if iname is not None:
11465
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
11466
                                   iname, errors.ECODE_EXISTS)
11467
      if not isinstance(self.op.nics, list):
11468
        raise errors.OpPrereqError("Invalid parameter 'nics'",
11469
                                   errors.ECODE_INVAL)
11470
      if not isinstance(self.op.disks, list):
11471
        raise errors.OpPrereqError("Invalid parameter 'disks'",
11472
                                   errors.ECODE_INVAL)
11473
      for row in self.op.disks:
11474
        if (not isinstance(row, dict) or
11475
            "size" not in row or
11476
            not isinstance(row["size"], int) or
11477
            "mode" not in row or
11478
            row["mode"] not in ['r', 'w']):
11479
          raise errors.OpPrereqError("Invalid contents of the 'disks'"
11480
                                     " parameter", errors.ECODE_INVAL)
11481
      if self.op.hypervisor is None:
11482
        self.op.hypervisor = self.cfg.GetHypervisorType()
11483
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
11484
      fname = _ExpandInstanceName(self.cfg, self.op.name)
11485
      self.op.name = fname
11486
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
11487
    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
11488
      if not hasattr(self.op, "evac_nodes"):
11489
        raise errors.OpPrereqError("Missing attribute 'evac_nodes' on"
11490
                                   " opcode input", errors.ECODE_INVAL)
11491
    else:
11492
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
11493
                                 self.op.mode, errors.ECODE_INVAL)
11494

    
11495
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
11496
      if self.op.allocator is None:
11497
        raise errors.OpPrereqError("Missing allocator name",
11498
                                   errors.ECODE_INVAL)
11499
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
11500
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
11501
                                 self.op.direction, errors.ECODE_INVAL)
11502

    
11503
  def Exec(self, feedback_fn):
11504
    """Run the allocator test.
11505

11506
    """
11507
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
11508
      ial = IAllocator(self.cfg, self.rpc,
11509
                       mode=self.op.mode,
11510
                       name=self.op.name,
11511
                       mem_size=self.op.mem_size,
11512
                       disks=self.op.disks,
11513
                       disk_template=self.op.disk_template,
11514
                       os=self.op.os,
11515
                       tags=self.op.tags,
11516
                       nics=self.op.nics,
11517
                       vcpus=self.op.vcpus,
11518
                       hypervisor=self.op.hypervisor,
11519
                       )
11520
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
11521
      ial = IAllocator(self.cfg, self.rpc,
11522
                       mode=self.op.mode,
11523
                       name=self.op.name,
11524
                       relocate_from=list(self.relocate_from),
11525
                       )
11526
    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
11527
      ial = IAllocator(self.cfg, self.rpc,
11528
                       mode=self.op.mode,
11529
                       evac_nodes=self.op.evac_nodes)
11530
    else:
11531
      raise errors.ProgrammerError("Uncatched mode %s in"
11532
                                   " LUTestAllocator.Exec", self.op.mode)
11533

    
11534
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
11535
      result = ial.in_text
11536
    else:
11537
      ial.Run(self.op.allocator, validate=False)
11538
      result = ial.out_text
11539
    return result
11540

    
11541

    
11542
#: Query type implementations
11543
_QUERY_IMPL = {
11544
  constants.QR_INSTANCE: _InstanceQuery,
11545
  constants.QR_NODE: _NodeQuery,
11546
  constants.QR_GROUP: _GroupQuery,
11547
  }
11548

    
11549

    
11550
def _GetQueryImplementation(name):
11551
  """Returns the implemtnation for a query type.
11552

11553
  @param name: Query type, must be one of L{constants.QR_OP_QUERY}
11554

11555
  """
11556
  try:
11557
    return _QUERY_IMPL[name]
11558
  except KeyError:
11559
    raise errors.OpPrereqError("Unknown query resource '%s'" % name,
11560
                               errors.ECODE_INVAL)