Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ be3a4b14

History | View | Annotate | Download (411.3 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the master-side code."""
23

    
24
# pylint: disable-msg=W0201,C0302
25

    
26
# W0201 since most LU attributes are defined in CheckPrereq or similar
27
# functions
28

    
29
# C0302: since we have waaaay to many lines in this module
30

    
31
import os
32
import os.path
33
import time
34
import re
35
import platform
36
import logging
37
import copy
38
import OpenSSL
39
import socket
40
import tempfile
41
import shutil
42
import itertools
43

    
44
from ganeti import ssh
45
from ganeti import utils
46
from ganeti import errors
47
from ganeti import hypervisor
48
from ganeti import locking
49
from ganeti import constants
50
from ganeti import objects
51
from ganeti import serializer
52
from ganeti import ssconf
53
from ganeti import uidpool
54
from ganeti import compat
55
from ganeti import masterd
56
from ganeti import netutils
57
from ganeti import query
58
from ganeti import qlang
59
from ganeti import opcodes
60

    
61
import ganeti.masterd.instance # pylint: disable-msg=W0611
62

    
63

    
64
def _SupportsOob(cfg, node):
65
  """Tells if node supports OOB.
66

67
  @type cfg: L{config.ConfigWriter}
68
  @param cfg: The cluster configuration
69
  @type node: L{objects.Node}
70
  @param node: The node
71
  @return: The OOB script if supported or an empty string otherwise
72

73
  """
74
  return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
75

    
76

    
77
# End types
78
class LogicalUnit(object):
79
  """Logical Unit base class.
80

81
  Subclasses must follow these rules:
82
    - implement ExpandNames
83
    - implement CheckPrereq (except when tasklets are used)
84
    - implement Exec (except when tasklets are used)
85
    - implement BuildHooksEnv
86
    - redefine HPATH and HTYPE
87
    - optionally redefine their run requirements:
88
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
89

90
  Note that all commands require root permissions.
91

92
  @ivar dry_run_result: the value (if any) that will be returned to the caller
93
      in dry-run mode (signalled by opcode dry_run parameter)
94

95
  """
96
  HPATH = None
97
  HTYPE = None
98
  REQ_BGL = True
99

    
100
  def __init__(self, processor, op, context, rpc):
101
    """Constructor for LogicalUnit.
102

103
    This needs to be overridden in derived classes in order to check op
104
    validity.
105

106
    """
107
    self.proc = processor
108
    self.op = op
109
    self.cfg = context.cfg
110
    self.context = context
111
    self.rpc = rpc
112
    # Dicts used to declare locking needs to mcpu
113
    self.needed_locks = None
114
    self.acquired_locks = {}
115
    self.share_locks = dict.fromkeys(locking.LEVELS, 0)
116
    self.add_locks = {}
117
    self.remove_locks = {}
118
    # Used to force good behavior when calling helper functions
119
    self.recalculate_locks = {}
120
    self.__ssh = None
121
    # logging
122
    self.Log = processor.Log # pylint: disable-msg=C0103
123
    self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103
124
    self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103
125
    self.LogStep = processor.LogStep # pylint: disable-msg=C0103
126
    # support for dry-run
127
    self.dry_run_result = None
128
    # support for generic debug attribute
129
    if (not hasattr(self.op, "debug_level") or
130
        not isinstance(self.op.debug_level, int)):
131
      self.op.debug_level = 0
132

    
133
    # Tasklets
134
    self.tasklets = None
135

    
136
    # Validate opcode parameters and set defaults
137
    self.op.Validate(True)
138

    
139
    self.CheckArguments()
140

    
141
  def __GetSSH(self):
142
    """Returns the SshRunner object
143

144
    """
145
    if not self.__ssh:
146
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
147
    return self.__ssh
148

    
149
  ssh = property(fget=__GetSSH)
150

    
151
  def CheckArguments(self):
152
    """Check syntactic validity for the opcode arguments.
153

154
    This method is for doing a simple syntactic check and ensure
155
    validity of opcode parameters, without any cluster-related
156
    checks. While the same can be accomplished in ExpandNames and/or
157
    CheckPrereq, doing these separate is better because:
158

159
      - ExpandNames is left as as purely a lock-related function
160
      - CheckPrereq is run after we have acquired locks (and possible
161
        waited for them)
162

163
    The function is allowed to change the self.op attribute so that
164
    later methods can no longer worry about missing parameters.
165

166
    """
167
    pass
168

    
169
  def ExpandNames(self):
170
    """Expand names for this LU.
171

172
    This method is called before starting to execute the opcode, and it should
173
    update all the parameters of the opcode to their canonical form (e.g. a
174
    short node name must be fully expanded after this method has successfully
175
    completed). This way locking, hooks, logging, etc. can work correctly.
176

177
    LUs which implement this method must also populate the self.needed_locks
178
    member, as a dict with lock levels as keys, and a list of needed lock names
179
    as values. Rules:
180

181
      - use an empty dict if you don't need any lock
182
      - if you don't need any lock at a particular level omit that level
183
      - don't put anything for the BGL level
184
      - if you want all locks at a level use locking.ALL_SET as a value
185

186
    If you need to share locks (rather than acquire them exclusively) at one
187
    level you can modify self.share_locks, setting a true value (usually 1) for
188
    that level. By default locks are not shared.
189

190
    This function can also define a list of tasklets, which then will be
191
    executed in order instead of the usual LU-level CheckPrereq and Exec
192
    functions, if those are not defined by the LU.
193

194
    Examples::
195

196
      # Acquire all nodes and one instance
197
      self.needed_locks = {
198
        locking.LEVEL_NODE: locking.ALL_SET,
199
        locking.LEVEL_INSTANCE: ['instance1.example.com'],
200
      }
201
      # Acquire just two nodes
202
      self.needed_locks = {
203
        locking.LEVEL_NODE: ['node1.example.com', 'node2.example.com'],
204
      }
205
      # Acquire no locks
206
      self.needed_locks = {} # No, you can't leave it to the default value None
207

208
    """
209
    # The implementation of this method is mandatory only if the new LU is
210
    # concurrent, so that old LUs don't need to be changed all at the same
211
    # time.
212
    if self.REQ_BGL:
213
      self.needed_locks = {} # Exclusive LUs don't need locks.
214
    else:
215
      raise NotImplementedError
216

    
217
  def DeclareLocks(self, level):
218
    """Declare LU locking needs for a level
219

220
    While most LUs can just declare their locking needs at ExpandNames time,
221
    sometimes there's the need to calculate some locks after having acquired
222
    the ones before. This function is called just before acquiring locks at a
223
    particular level, but after acquiring the ones at lower levels, and permits
224
    such calculations. It can be used to modify self.needed_locks, and by
225
    default it does nothing.
226

227
    This function is only called if you have something already set in
228
    self.needed_locks for the level.
229

230
    @param level: Locking level which is going to be locked
231
    @type level: member of ganeti.locking.LEVELS
232

233
    """
234

    
235
  def CheckPrereq(self):
236
    """Check prerequisites for this LU.
237

238
    This method should check that the prerequisites for the execution
239
    of this LU are fulfilled. It can do internode communication, but
240
    it should be idempotent - no cluster or system changes are
241
    allowed.
242

243
    The method should raise errors.OpPrereqError in case something is
244
    not fulfilled. Its return value is ignored.
245

246
    This method should also update all the parameters of the opcode to
247
    their canonical form if it hasn't been done by ExpandNames before.
248

249
    """
250
    if self.tasklets is not None:
251
      for (idx, tl) in enumerate(self.tasklets):
252
        logging.debug("Checking prerequisites for tasklet %s/%s",
253
                      idx + 1, len(self.tasklets))
254
        tl.CheckPrereq()
255
    else:
256
      pass
257

    
258
  def Exec(self, feedback_fn):
259
    """Execute the LU.
260

261
    This method should implement the actual work. It should raise
262
    errors.OpExecError for failures that are somewhat dealt with in
263
    code, or expected.
264

265
    """
266
    if self.tasklets is not None:
267
      for (idx, tl) in enumerate(self.tasklets):
268
        logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
269
        tl.Exec(feedback_fn)
270
    else:
271
      raise NotImplementedError
272

    
273
  def BuildHooksEnv(self):
274
    """Build hooks environment for this LU.
275

276
    This method should return a three-node tuple consisting of: a dict
277
    containing the environment that will be used for running the
278
    specific hook for this LU, a list of node names on which the hook
279
    should run before the execution, and a list of node names on which
280
    the hook should run after the execution.
281

282
    The keys of the dict must not have 'GANETI_' prefixed as this will
283
    be handled in the hooks runner. Also note additional keys will be
284
    added by the hooks runner. If the LU doesn't define any
285
    environment, an empty dict (and not None) should be returned.
286

287
    No nodes should be returned as an empty list (and not None).
288

289
    Note that if the HPATH for a LU class is None, this function will
290
    not be called.
291

292
    """
293
    raise NotImplementedError
294

    
295
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
296
    """Notify the LU about the results of its hooks.
297

298
    This method is called every time a hooks phase is executed, and notifies
299
    the Logical Unit about the hooks' result. The LU can then use it to alter
300
    its result based on the hooks.  By default the method does nothing and the
301
    previous result is passed back unchanged but any LU can define it if it
302
    wants to use the local cluster hook-scripts somehow.
303

304
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
305
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
306
    @param hook_results: the results of the multi-node hooks rpc call
307
    @param feedback_fn: function used send feedback back to the caller
308
    @param lu_result: the previous Exec result this LU had, or None
309
        in the PRE phase
310
    @return: the new Exec result, based on the previous result
311
        and hook results
312

313
    """
314
    # API must be kept, thus we ignore the unused argument and could
315
    # be a function warnings
316
    # pylint: disable-msg=W0613,R0201
317
    return lu_result
318

    
319
  def _ExpandAndLockInstance(self):
320
    """Helper function to expand and lock an instance.
321

322
    Many LUs that work on an instance take its name in self.op.instance_name
323
    and need to expand it and then declare the expanded name for locking. This
324
    function does it, and then updates self.op.instance_name to the expanded
325
    name. It also initializes needed_locks as a dict, if this hasn't been done
326
    before.
327

328
    """
329
    if self.needed_locks is None:
330
      self.needed_locks = {}
331
    else:
332
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
333
        "_ExpandAndLockInstance called with instance-level locks set"
334
    self.op.instance_name = _ExpandInstanceName(self.cfg,
335
                                                self.op.instance_name)
336
    self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
337

    
338
  def _LockInstancesNodes(self, primary_only=False):
339
    """Helper function to declare instances' nodes for locking.
340

341
    This function should be called after locking one or more instances to lock
342
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
343
    with all primary or secondary nodes for instances already locked and
344
    present in self.needed_locks[locking.LEVEL_INSTANCE].
345

346
    It should be called from DeclareLocks, and for safety only works if
347
    self.recalculate_locks[locking.LEVEL_NODE] is set.
348

349
    In the future it may grow parameters to just lock some instance's nodes, or
350
    to just lock primaries or secondary nodes, if needed.
351

352
    If should be called in DeclareLocks in a way similar to::
353

354
      if level == locking.LEVEL_NODE:
355
        self._LockInstancesNodes()
356

357
    @type primary_only: boolean
358
    @param primary_only: only lock primary nodes of locked instances
359

360
    """
361
    assert locking.LEVEL_NODE in self.recalculate_locks, \
362
      "_LockInstancesNodes helper function called with no nodes to recalculate"
363

    
364
    # TODO: check if we're really been called with the instance locks held
365

    
366
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
367
    # future we might want to have different behaviors depending on the value
368
    # of self.recalculate_locks[locking.LEVEL_NODE]
369
    wanted_nodes = []
370
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
371
      instance = self.context.cfg.GetInstanceInfo(instance_name)
372
      wanted_nodes.append(instance.primary_node)
373
      if not primary_only:
374
        wanted_nodes.extend(instance.secondary_nodes)
375

    
376
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
377
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
378
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
379
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
380

    
381
    del self.recalculate_locks[locking.LEVEL_NODE]
382

    
383

    
384
class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223
385
  """Simple LU which runs no hooks.
386

387
  This LU is intended as a parent for other LogicalUnits which will
388
  run no hooks, in order to reduce duplicate code.
389

390
  """
391
  HPATH = None
392
  HTYPE = None
393

    
394
  def BuildHooksEnv(self):
395
    """Empty BuildHooksEnv for NoHooksLu.
396

397
    This just raises an error.
398

399
    """
400
    assert False, "BuildHooksEnv called for NoHooksLUs"
401

    
402

    
403
class Tasklet:
404
  """Tasklet base class.
405

406
  Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
407
  they can mix legacy code with tasklets. Locking needs to be done in the LU,
408
  tasklets know nothing about locks.
409

410
  Subclasses must follow these rules:
411
    - Implement CheckPrereq
412
    - Implement Exec
413

414
  """
415
  def __init__(self, lu):
416
    self.lu = lu
417

    
418
    # Shortcuts
419
    self.cfg = lu.cfg
420
    self.rpc = lu.rpc
421

    
422
  def CheckPrereq(self):
423
    """Check prerequisites for this tasklets.
424

425
    This method should check whether the prerequisites for the execution of
426
    this tasklet are fulfilled. It can do internode communication, but it
427
    should be idempotent - no cluster or system changes are allowed.
428

429
    The method should raise errors.OpPrereqError in case something is not
430
    fulfilled. Its return value is ignored.
431

432
    This method should also update all parameters to their canonical form if it
433
    hasn't been done before.
434

435
    """
436
    pass
437

    
438
  def Exec(self, feedback_fn):
439
    """Execute the tasklet.
440

441
    This method should implement the actual work. It should raise
442
    errors.OpExecError for failures that are somewhat dealt with in code, or
443
    expected.
444

445
    """
446
    raise NotImplementedError
447

    
448

    
449
class _QueryBase:
450
  """Base for query utility classes.
451

452
  """
453
  #: Attribute holding field definitions
454
  FIELDS = None
455

    
456
  def __init__(self, filter_, fields, use_locking):
457
    """Initializes this class.
458

459
    """
460
    self.use_locking = use_locking
461

    
462
    self.query = query.Query(self.FIELDS, fields, filter_=filter_,
463
                             namefield="name")
464
    self.requested_data = self.query.RequestedData()
465
    self.names = self.query.RequestedNames()
466

    
467
    # Sort only if no names were requested
468
    self.sort_by_name = not self.names
469

    
470
    self.do_locking = None
471
    self.wanted = None
472

    
473
  def _GetNames(self, lu, all_names, lock_level):
474
    """Helper function to determine names asked for in the query.
475

476
    """
477
    if self.do_locking:
478
      names = lu.acquired_locks[lock_level]
479
    else:
480
      names = all_names
481

    
482
    if self.wanted == locking.ALL_SET:
483
      assert not self.names
484
      # caller didn't specify names, so ordering is not important
485
      return utils.NiceSort(names)
486

    
487
    # caller specified names and we must keep the same order
488
    assert self.names
489
    assert not self.do_locking or lu.acquired_locks[lock_level]
490

    
491
    missing = set(self.wanted).difference(names)
492
    if missing:
493
      raise errors.OpExecError("Some items were removed before retrieving"
494
                               " their data: %s" % missing)
495

    
496
    # Return expanded names
497
    return self.wanted
498

    
499
  @classmethod
500
  def FieldsQuery(cls, fields):
501
    """Returns list of available fields.
502

503
    @return: List of L{objects.QueryFieldDefinition}
504

505
    """
506
    return query.QueryFields(cls.FIELDS, fields)
507

    
508
  def ExpandNames(self, lu):
509
    """Expand names for this query.
510

511
    See L{LogicalUnit.ExpandNames}.
512

513
    """
514
    raise NotImplementedError()
515

    
516
  def DeclareLocks(self, lu, level):
517
    """Declare locks for this query.
518

519
    See L{LogicalUnit.DeclareLocks}.
520

521
    """
522
    raise NotImplementedError()
523

    
524
  def _GetQueryData(self, lu):
525
    """Collects all data for this query.
526

527
    @return: Query data object
528

529
    """
530
    raise NotImplementedError()
531

    
532
  def NewStyleQuery(self, lu):
533
    """Collect data and execute query.
534

535
    """
536
    return query.GetQueryResponse(self.query, self._GetQueryData(lu),
537
                                  sort_by_name=self.sort_by_name)
538

    
539
  def OldStyleQuery(self, lu):
540
    """Collect data and execute query.
541

542
    """
543
    return self.query.OldStyleQuery(self._GetQueryData(lu),
544
                                    sort_by_name=self.sort_by_name)
545

    
546

    
547
def _GetWantedNodes(lu, nodes):
548
  """Returns list of checked and expanded node names.
549

550
  @type lu: L{LogicalUnit}
551
  @param lu: the logical unit on whose behalf we execute
552
  @type nodes: list
553
  @param nodes: list of node names or None for all nodes
554
  @rtype: list
555
  @return: the list of nodes, sorted
556
  @raise errors.ProgrammerError: if the nodes parameter is wrong type
557

558
  """
559
  if nodes:
560
    return [_ExpandNodeName(lu.cfg, name) for name in nodes]
561

    
562
  return utils.NiceSort(lu.cfg.GetNodeList())
563

    
564

    
565
def _GetWantedInstances(lu, instances):
566
  """Returns list of checked and expanded instance names.
567

568
  @type lu: L{LogicalUnit}
569
  @param lu: the logical unit on whose behalf we execute
570
  @type instances: list
571
  @param instances: list of instance names or None for all instances
572
  @rtype: list
573
  @return: the list of instances, sorted
574
  @raise errors.OpPrereqError: if the instances parameter is wrong type
575
  @raise errors.OpPrereqError: if any of the passed instances is not found
576

577
  """
578
  if instances:
579
    wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
580
  else:
581
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
582
  return wanted
583

    
584

    
585
def _GetUpdatedParams(old_params, update_dict,
586
                      use_default=True, use_none=False):
587
  """Return the new version of a parameter dictionary.
588

589
  @type old_params: dict
590
  @param old_params: old parameters
591
  @type update_dict: dict
592
  @param update_dict: dict containing new parameter values, or
593
      constants.VALUE_DEFAULT to reset the parameter to its default
594
      value
595
  @param use_default: boolean
596
  @type use_default: whether to recognise L{constants.VALUE_DEFAULT}
597
      values as 'to be deleted' values
598
  @param use_none: boolean
599
  @type use_none: whether to recognise C{None} values as 'to be
600
      deleted' values
601
  @rtype: dict
602
  @return: the new parameter dictionary
603

604
  """
605
  params_copy = copy.deepcopy(old_params)
606
  for key, val in update_dict.iteritems():
607
    if ((use_default and val == constants.VALUE_DEFAULT) or
608
        (use_none and val is None)):
609
      try:
610
        del params_copy[key]
611
      except KeyError:
612
        pass
613
    else:
614
      params_copy[key] = val
615
  return params_copy
616

    
617

    
618
def _CheckOutputFields(static, dynamic, selected):
619
  """Checks whether all selected fields are valid.
620

621
  @type static: L{utils.FieldSet}
622
  @param static: static fields set
623
  @type dynamic: L{utils.FieldSet}
624
  @param dynamic: dynamic fields set
625

626
  """
627
  f = utils.FieldSet()
628
  f.Extend(static)
629
  f.Extend(dynamic)
630

    
631
  delta = f.NonMatching(selected)
632
  if delta:
633
    raise errors.OpPrereqError("Unknown output fields selected: %s"
634
                               % ",".join(delta), errors.ECODE_INVAL)
635

    
636

    
637
def _CheckGlobalHvParams(params):
638
  """Validates that given hypervisor params are not global ones.
639

640
  This will ensure that instances don't get customised versions of
641
  global params.
642

643
  """
644
  used_globals = constants.HVC_GLOBALS.intersection(params)
645
  if used_globals:
646
    msg = ("The following hypervisor parameters are global and cannot"
647
           " be customized at instance level, please modify them at"
648
           " cluster level: %s" % utils.CommaJoin(used_globals))
649
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
650

    
651

    
652
def _CheckNodeOnline(lu, node, msg=None):
653
  """Ensure that a given node is online.
654

655
  @param lu: the LU on behalf of which we make the check
656
  @param node: the node to check
657
  @param msg: if passed, should be a message to replace the default one
658
  @raise errors.OpPrereqError: if the node is offline
659

660
  """
661
  if msg is None:
662
    msg = "Can't use offline node"
663
  if lu.cfg.GetNodeInfo(node).offline:
664
    raise errors.OpPrereqError("%s: %s" % (msg, node), errors.ECODE_STATE)
665

    
666

    
667
def _CheckNodeNotDrained(lu, node):
668
  """Ensure that a given node is not drained.
669

670
  @param lu: the LU on behalf of which we make the check
671
  @param node: the node to check
672
  @raise errors.OpPrereqError: if the node is drained
673

674
  """
675
  if lu.cfg.GetNodeInfo(node).drained:
676
    raise errors.OpPrereqError("Can't use drained node %s" % node,
677
                               errors.ECODE_STATE)
678

    
679

    
680
def _CheckNodeVmCapable(lu, node):
681
  """Ensure that a given node is vm capable.
682

683
  @param lu: the LU on behalf of which we make the check
684
  @param node: the node to check
685
  @raise errors.OpPrereqError: if the node is not vm capable
686

687
  """
688
  if not lu.cfg.GetNodeInfo(node).vm_capable:
689
    raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node,
690
                               errors.ECODE_STATE)
691

    
692

    
693
def _CheckNodeHasOS(lu, node, os_name, force_variant):
694
  """Ensure that a node supports a given OS.
695

696
  @param lu: the LU on behalf of which we make the check
697
  @param node: the node to check
698
  @param os_name: the OS to query about
699
  @param force_variant: whether to ignore variant errors
700
  @raise errors.OpPrereqError: if the node is not supporting the OS
701

702
  """
703
  result = lu.rpc.call_os_get(node, os_name)
704
  result.Raise("OS '%s' not in supported OS list for node %s" %
705
               (os_name, node),
706
               prereq=True, ecode=errors.ECODE_INVAL)
707
  if not force_variant:
708
    _CheckOSVariant(result.payload, os_name)
709

    
710

    
711
def _CheckNodeHasSecondaryIP(lu, node, secondary_ip, prereq):
712
  """Ensure that a node has the given secondary ip.
713

714
  @type lu: L{LogicalUnit}
715
  @param lu: the LU on behalf of which we make the check
716
  @type node: string
717
  @param node: the node to check
718
  @type secondary_ip: string
719
  @param secondary_ip: the ip to check
720
  @type prereq: boolean
721
  @param prereq: whether to throw a prerequisite or an execute error
722
  @raise errors.OpPrereqError: if the node doesn't have the ip, and prereq=True
723
  @raise errors.OpExecError: if the node doesn't have the ip, and prereq=False
724

725
  """
726
  result = lu.rpc.call_node_has_ip_address(node, secondary_ip)
727
  result.Raise("Failure checking secondary ip on node %s" % node,
728
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
729
  if not result.payload:
730
    msg = ("Node claims it doesn't have the secondary ip you gave (%s),"
731
           " please fix and re-run this command" % secondary_ip)
732
    if prereq:
733
      raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
734
    else:
735
      raise errors.OpExecError(msg)
736

    
737

    
738
def _GetClusterDomainSecret():
739
  """Reads the cluster domain secret.
740

741
  """
742
  return utils.ReadOneLineFile(constants.CLUSTER_DOMAIN_SECRET_FILE,
743
                               strict=True)
744

    
745

    
746
def _CheckInstanceDown(lu, instance, reason):
747
  """Ensure that an instance is not running."""
748
  if instance.admin_up:
749
    raise errors.OpPrereqError("Instance %s is marked to be up, %s" %
750
                               (instance.name, reason), errors.ECODE_STATE)
751

    
752
  pnode = instance.primary_node
753
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
754
  ins_l.Raise("Can't contact node %s for instance information" % pnode,
755
              prereq=True, ecode=errors.ECODE_ENVIRON)
756

    
757
  if instance.name in ins_l.payload:
758
    raise errors.OpPrereqError("Instance %s is running, %s" %
759
                               (instance.name, reason), errors.ECODE_STATE)
760

    
761

    
762
def _ExpandItemName(fn, name, kind):
763
  """Expand an item name.
764

765
  @param fn: the function to use for expansion
766
  @param name: requested item name
767
  @param kind: text description ('Node' or 'Instance')
768
  @return: the resolved (full) name
769
  @raise errors.OpPrereqError: if the item is not found
770

771
  """
772
  full_name = fn(name)
773
  if full_name is None:
774
    raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
775
                               errors.ECODE_NOENT)
776
  return full_name
777

    
778

    
779
def _ExpandNodeName(cfg, name):
780
  """Wrapper over L{_ExpandItemName} for nodes."""
781
  return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
782

    
783

    
784
def _ExpandInstanceName(cfg, name):
785
  """Wrapper over L{_ExpandItemName} for instance."""
786
  return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
787

    
788

    
789
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
790
                          memory, vcpus, nics, disk_template, disks,
791
                          bep, hvp, hypervisor_name):
792
  """Builds instance related env variables for hooks
793

794
  This builds the hook environment from individual variables.
795

796
  @type name: string
797
  @param name: the name of the instance
798
  @type primary_node: string
799
  @param primary_node: the name of the instance's primary node
800
  @type secondary_nodes: list
801
  @param secondary_nodes: list of secondary nodes as strings
802
  @type os_type: string
803
  @param os_type: the name of the instance's OS
804
  @type status: boolean
805
  @param status: the should_run status of the instance
806
  @type memory: string
807
  @param memory: the memory size of the instance
808
  @type vcpus: string
809
  @param vcpus: the count of VCPUs the instance has
810
  @type nics: list
811
  @param nics: list of tuples (ip, mac, mode, link) representing
812
      the NICs the instance has
813
  @type disk_template: string
814
  @param disk_template: the disk template of the instance
815
  @type disks: list
816
  @param disks: the list of (size, mode) pairs
817
  @type bep: dict
818
  @param bep: the backend parameters for the instance
819
  @type hvp: dict
820
  @param hvp: the hypervisor parameters for the instance
821
  @type hypervisor_name: string
822
  @param hypervisor_name: the hypervisor for the instance
823
  @rtype: dict
824
  @return: the hook environment for this instance
825

826
  """
827
  if status:
828
    str_status = "up"
829
  else:
830
    str_status = "down"
831
  env = {
832
    "OP_TARGET": name,
833
    "INSTANCE_NAME": name,
834
    "INSTANCE_PRIMARY": primary_node,
835
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
836
    "INSTANCE_OS_TYPE": os_type,
837
    "INSTANCE_STATUS": str_status,
838
    "INSTANCE_MEMORY": memory,
839
    "INSTANCE_VCPUS": vcpus,
840
    "INSTANCE_DISK_TEMPLATE": disk_template,
841
    "INSTANCE_HYPERVISOR": hypervisor_name,
842
  }
843

    
844
  if nics:
845
    nic_count = len(nics)
846
    for idx, (ip, mac, mode, link) in enumerate(nics):
847
      if ip is None:
848
        ip = ""
849
      env["INSTANCE_NIC%d_IP" % idx] = ip
850
      env["INSTANCE_NIC%d_MAC" % idx] = mac
851
      env["INSTANCE_NIC%d_MODE" % idx] = mode
852
      env["INSTANCE_NIC%d_LINK" % idx] = link
853
      if mode == constants.NIC_MODE_BRIDGED:
854
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
855
  else:
856
    nic_count = 0
857

    
858
  env["INSTANCE_NIC_COUNT"] = nic_count
859

    
860
  if disks:
861
    disk_count = len(disks)
862
    for idx, (size, mode) in enumerate(disks):
863
      env["INSTANCE_DISK%d_SIZE" % idx] = size
864
      env["INSTANCE_DISK%d_MODE" % idx] = mode
865
  else:
866
    disk_count = 0
867

    
868
  env["INSTANCE_DISK_COUNT"] = disk_count
869

    
870
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
871
    for key, value in source.items():
872
      env["INSTANCE_%s_%s" % (kind, key)] = value
873

    
874
  return env
875

    
876

    
877
def _NICListToTuple(lu, nics):
878
  """Build a list of nic information tuples.
879

880
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
881
  value in LUInstanceQueryData.
882

883
  @type lu:  L{LogicalUnit}
884
  @param lu: the logical unit on whose behalf we execute
885
  @type nics: list of L{objects.NIC}
886
  @param nics: list of nics to convert to hooks tuples
887

888
  """
889
  hooks_nics = []
890
  cluster = lu.cfg.GetClusterInfo()
891
  for nic in nics:
892
    ip = nic.ip
893
    mac = nic.mac
894
    filled_params = cluster.SimpleFillNIC(nic.nicparams)
895
    mode = filled_params[constants.NIC_MODE]
896
    link = filled_params[constants.NIC_LINK]
897
    hooks_nics.append((ip, mac, mode, link))
898
  return hooks_nics
899

    
900

    
901
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
902
  """Builds instance related env variables for hooks from an object.
903

904
  @type lu: L{LogicalUnit}
905
  @param lu: the logical unit on whose behalf we execute
906
  @type instance: L{objects.Instance}
907
  @param instance: the instance for which we should build the
908
      environment
909
  @type override: dict
910
  @param override: dictionary with key/values that will override
911
      our values
912
  @rtype: dict
913
  @return: the hook environment dictionary
914

915
  """
916
  cluster = lu.cfg.GetClusterInfo()
917
  bep = cluster.FillBE(instance)
918
  hvp = cluster.FillHV(instance)
919
  args = {
920
    'name': instance.name,
921
    'primary_node': instance.primary_node,
922
    'secondary_nodes': instance.secondary_nodes,
923
    'os_type': instance.os,
924
    'status': instance.admin_up,
925
    'memory': bep[constants.BE_MEMORY],
926
    'vcpus': bep[constants.BE_VCPUS],
927
    'nics': _NICListToTuple(lu, instance.nics),
928
    'disk_template': instance.disk_template,
929
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
930
    'bep': bep,
931
    'hvp': hvp,
932
    'hypervisor_name': instance.hypervisor,
933
  }
934
  if override:
935
    args.update(override)
936
  return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142
937

    
938

    
939
def _AdjustCandidatePool(lu, exceptions):
940
  """Adjust the candidate pool after node operations.
941

942
  """
943
  mod_list = lu.cfg.MaintainCandidatePool(exceptions)
944
  if mod_list:
945
    lu.LogInfo("Promoted nodes to master candidate role: %s",
946
               utils.CommaJoin(node.name for node in mod_list))
947
    for name in mod_list:
948
      lu.context.ReaddNode(name)
949
  mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
950
  if mc_now > mc_max:
951
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
952
               (mc_now, mc_max))
953

    
954

    
955
def _DecideSelfPromotion(lu, exceptions=None):
956
  """Decide whether I should promote myself as a master candidate.
957

958
  """
959
  cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
960
  mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
961
  # the new node will increase mc_max with one, so:
962
  mc_should = min(mc_should + 1, cp_size)
963
  return mc_now < mc_should
964

    
965

    
966
def _CheckNicsBridgesExist(lu, target_nics, target_node):
967
  """Check that the brigdes needed by a list of nics exist.
968

969
  """
970
  cluster = lu.cfg.GetClusterInfo()
971
  paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in target_nics]
972
  brlist = [params[constants.NIC_LINK] for params in paramslist
973
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
974
  if brlist:
975
    result = lu.rpc.call_bridges_exist(target_node, brlist)
976
    result.Raise("Error checking bridges on destination node '%s'" %
977
                 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
978

    
979

    
980
def _CheckInstanceBridgesExist(lu, instance, node=None):
981
  """Check that the brigdes needed by an instance exist.
982

983
  """
984
  if node is None:
985
    node = instance.primary_node
986
  _CheckNicsBridgesExist(lu, instance.nics, node)
987

    
988

    
989
def _CheckOSVariant(os_obj, name):
990
  """Check whether an OS name conforms to the os variants specification.
991

992
  @type os_obj: L{objects.OS}
993
  @param os_obj: OS object to check
994
  @type name: string
995
  @param name: OS name passed by the user, to check for validity
996

997
  """
998
  if not os_obj.supported_variants:
999
    return
1000
  variant = objects.OS.GetVariant(name)
1001
  if not variant:
1002
    raise errors.OpPrereqError("OS name must include a variant",
1003
                               errors.ECODE_INVAL)
1004

    
1005
  if variant not in os_obj.supported_variants:
1006
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
1007

    
1008

    
1009
def _GetNodeInstancesInner(cfg, fn):
1010
  return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
1011

    
1012

    
1013
def _GetNodeInstances(cfg, node_name):
1014
  """Returns a list of all primary and secondary instances on a node.
1015

1016
  """
1017

    
1018
  return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
1019

    
1020

    
1021
def _GetNodePrimaryInstances(cfg, node_name):
1022
  """Returns primary instances on a node.
1023

1024
  """
1025
  return _GetNodeInstancesInner(cfg,
1026
                                lambda inst: node_name == inst.primary_node)
1027

    
1028

    
1029
def _GetNodeSecondaryInstances(cfg, node_name):
1030
  """Returns secondary instances on a node.
1031

1032
  """
1033
  return _GetNodeInstancesInner(cfg,
1034
                                lambda inst: node_name in inst.secondary_nodes)
1035

    
1036

    
1037
def _GetStorageTypeArgs(cfg, storage_type):
1038
  """Returns the arguments for a storage type.
1039

1040
  """
1041
  # Special case for file storage
1042
  if storage_type == constants.ST_FILE:
1043
    # storage.FileStorage wants a list of storage directories
1044
    return [[cfg.GetFileStorageDir(), cfg.GetSharedFileStorageDir()]]
1045

    
1046
  return []
1047

    
1048

    
1049
def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
1050
  faulty = []
1051

    
1052
  for dev in instance.disks:
1053
    cfg.SetDiskID(dev, node_name)
1054

    
1055
  result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
1056
  result.Raise("Failed to get disk status from node %s" % node_name,
1057
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
1058

    
1059
  for idx, bdev_status in enumerate(result.payload):
1060
    if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
1061
      faulty.append(idx)
1062

    
1063
  return faulty
1064

    
1065

    
1066
def _CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
1067
  """Check the sanity of iallocator and node arguments and use the
1068
  cluster-wide iallocator if appropriate.
1069

1070
  Check that at most one of (iallocator, node) is specified. If none is
1071
  specified, then the LU's opcode's iallocator slot is filled with the
1072
  cluster-wide default iallocator.
1073

1074
  @type iallocator_slot: string
1075
  @param iallocator_slot: the name of the opcode iallocator slot
1076
  @type node_slot: string
1077
  @param node_slot: the name of the opcode target node slot
1078

1079
  """
1080
  node = getattr(lu.op, node_slot, None)
1081
  iallocator = getattr(lu.op, iallocator_slot, None)
1082

    
1083
  if node is not None and iallocator is not None:
1084
    raise errors.OpPrereqError("Do not specify both, iallocator and node.",
1085
                               errors.ECODE_INVAL)
1086
  elif node is None and iallocator is None:
1087
    default_iallocator = lu.cfg.GetDefaultIAllocator()
1088
    if default_iallocator:
1089
      setattr(lu.op, iallocator_slot, default_iallocator)
1090
    else:
1091
      raise errors.OpPrereqError("No iallocator or node given and no"
1092
                                 " cluster-wide default iallocator found."
1093
                                 " Please specify either an iallocator or a"
1094
                                 " node, or set a cluster-wide default"
1095
                                 " iallocator.")
1096

    
1097

    
1098
class LUClusterPostInit(LogicalUnit):
1099
  """Logical unit for running hooks after cluster initialization.
1100

1101
  """
1102
  HPATH = "cluster-init"
1103
  HTYPE = constants.HTYPE_CLUSTER
1104

    
1105
  def BuildHooksEnv(self):
1106
    """Build hooks env.
1107

1108
    """
1109
    env = {"OP_TARGET": self.cfg.GetClusterName()}
1110
    mn = self.cfg.GetMasterNode()
1111
    return env, [], [mn]
1112

    
1113
  def Exec(self, feedback_fn):
1114
    """Nothing to do.
1115

1116
    """
1117
    return True
1118

    
1119

    
1120
class LUClusterDestroy(LogicalUnit):
1121
  """Logical unit for destroying the cluster.
1122

1123
  """
1124
  HPATH = "cluster-destroy"
1125
  HTYPE = constants.HTYPE_CLUSTER
1126

    
1127
  def BuildHooksEnv(self):
1128
    """Build hooks env.
1129

1130
    """
1131
    env = {"OP_TARGET": self.cfg.GetClusterName()}
1132
    return env, [], []
1133

    
1134
  def CheckPrereq(self):
1135
    """Check prerequisites.
1136

1137
    This checks whether the cluster is empty.
1138

1139
    Any errors are signaled by raising errors.OpPrereqError.
1140

1141
    """
1142
    master = self.cfg.GetMasterNode()
1143

    
1144
    nodelist = self.cfg.GetNodeList()
1145
    if len(nodelist) != 1 or nodelist[0] != master:
1146
      raise errors.OpPrereqError("There are still %d node(s) in"
1147
                                 " this cluster." % (len(nodelist) - 1),
1148
                                 errors.ECODE_INVAL)
1149
    instancelist = self.cfg.GetInstanceList()
1150
    if instancelist:
1151
      raise errors.OpPrereqError("There are still %d instance(s) in"
1152
                                 " this cluster." % len(instancelist),
1153
                                 errors.ECODE_INVAL)
1154

    
1155
  def Exec(self, feedback_fn):
1156
    """Destroys the cluster.
1157

1158
    """
1159
    master = self.cfg.GetMasterNode()
1160

    
1161
    # Run post hooks on master node before it's removed
1162
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
1163
    try:
1164
      hm.RunPhase(constants.HOOKS_PHASE_POST, [master])
1165
    except:
1166
      # pylint: disable-msg=W0702
1167
      self.LogWarning("Errors occurred running hooks on %s" % master)
1168

    
1169
    result = self.rpc.call_node_stop_master(master, False)
1170
    result.Raise("Could not disable the master role")
1171

    
1172
    return master
1173

    
1174

    
1175
def _VerifyCertificate(filename):
1176
  """Verifies a certificate for LUClusterVerify.
1177

1178
  @type filename: string
1179
  @param filename: Path to PEM file
1180

1181
  """
1182
  try:
1183
    cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
1184
                                           utils.ReadFile(filename))
1185
  except Exception, err: # pylint: disable-msg=W0703
1186
    return (LUClusterVerify.ETYPE_ERROR,
1187
            "Failed to load X509 certificate %s: %s" % (filename, err))
1188

    
1189
  (errcode, msg) = \
1190
    utils.VerifyX509Certificate(cert, constants.SSL_CERT_EXPIRATION_WARN,
1191
                                constants.SSL_CERT_EXPIRATION_ERROR)
1192

    
1193
  if msg:
1194
    fnamemsg = "While verifying %s: %s" % (filename, msg)
1195
  else:
1196
    fnamemsg = None
1197

    
1198
  if errcode is None:
1199
    return (None, fnamemsg)
1200
  elif errcode == utils.CERT_WARNING:
1201
    return (LUClusterVerify.ETYPE_WARNING, fnamemsg)
1202
  elif errcode == utils.CERT_ERROR:
1203
    return (LUClusterVerify.ETYPE_ERROR, fnamemsg)
1204

    
1205
  raise errors.ProgrammerError("Unhandled certificate error code %r" % errcode)
1206

    
1207

    
1208
class LUClusterVerify(LogicalUnit):
1209
  """Verifies the cluster status.
1210

1211
  """
1212
  HPATH = "cluster-verify"
1213
  HTYPE = constants.HTYPE_CLUSTER
1214
  REQ_BGL = False
1215

    
1216
  TCLUSTER = "cluster"
1217
  TNODE = "node"
1218
  TINSTANCE = "instance"
1219

    
1220
  ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
1221
  ECLUSTERCERT = (TCLUSTER, "ECLUSTERCERT")
1222
  EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
1223
  EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
1224
  EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
1225
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1226
  EINSTANCEFAULTYDISK = (TINSTANCE, "EINSTANCEFAULTYDISK")
1227
  EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
1228
  EINSTANCESPLITGROUPS = (TINSTANCE, "EINSTANCESPLITGROUPS")
1229
  ENODEDRBD = (TNODE, "ENODEDRBD")
1230
  ENODEDRBDHELPER = (TNODE, "ENODEDRBDHELPER")
1231
  ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
1232
  ENODEHOOKS = (TNODE, "ENODEHOOKS")
1233
  ENODEHV = (TNODE, "ENODEHV")
1234
  ENODELVM = (TNODE, "ENODELVM")
1235
  ENODEN1 = (TNODE, "ENODEN1")
1236
  ENODENET = (TNODE, "ENODENET")
1237
  ENODEOS = (TNODE, "ENODEOS")
1238
  ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
1239
  ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
1240
  ENODERPC = (TNODE, "ENODERPC")
1241
  ENODESSH = (TNODE, "ENODESSH")
1242
  ENODEVERSION = (TNODE, "ENODEVERSION")
1243
  ENODESETUP = (TNODE, "ENODESETUP")
1244
  ENODETIME = (TNODE, "ENODETIME")
1245
  ENODEOOBPATH = (TNODE, "ENODEOOBPATH")
1246

    
1247
  ETYPE_FIELD = "code"
1248
  ETYPE_ERROR = "ERROR"
1249
  ETYPE_WARNING = "WARNING"
1250

    
1251
  _HOOKS_INDENT_RE = re.compile("^", re.M)
1252

    
1253
  class NodeImage(object):
1254
    """A class representing the logical and physical status of a node.
1255

1256
    @type name: string
1257
    @ivar name: the node name to which this object refers
1258
    @ivar volumes: a structure as returned from
1259
        L{ganeti.backend.GetVolumeList} (runtime)
1260
    @ivar instances: a list of running instances (runtime)
1261
    @ivar pinst: list of configured primary instances (config)
1262
    @ivar sinst: list of configured secondary instances (config)
1263
    @ivar sbp: dictionary of {primary-node: list of instances} for all
1264
        instances for which this node is secondary (config)
1265
    @ivar mfree: free memory, as reported by hypervisor (runtime)
1266
    @ivar dfree: free disk, as reported by the node (runtime)
1267
    @ivar offline: the offline status (config)
1268
    @type rpc_fail: boolean
1269
    @ivar rpc_fail: whether the RPC verify call was successfull (overall,
1270
        not whether the individual keys were correct) (runtime)
1271
    @type lvm_fail: boolean
1272
    @ivar lvm_fail: whether the RPC call didn't return valid LVM data
1273
    @type hyp_fail: boolean
1274
    @ivar hyp_fail: whether the RPC call didn't return the instance list
1275
    @type ghost: boolean
1276
    @ivar ghost: whether this is a known node or not (config)
1277
    @type os_fail: boolean
1278
    @ivar os_fail: whether the RPC call didn't return valid OS data
1279
    @type oslist: list
1280
    @ivar oslist: list of OSes as diagnosed by DiagnoseOS
1281
    @type vm_capable: boolean
1282
    @ivar vm_capable: whether the node can host instances
1283

1284
    """
1285
    def __init__(self, offline=False, name=None, vm_capable=True):
1286
      self.name = name
1287
      self.volumes = {}
1288
      self.instances = []
1289
      self.pinst = []
1290
      self.sinst = []
1291
      self.sbp = {}
1292
      self.mfree = 0
1293
      self.dfree = 0
1294
      self.offline = offline
1295
      self.vm_capable = vm_capable
1296
      self.rpc_fail = False
1297
      self.lvm_fail = False
1298
      self.hyp_fail = False
1299
      self.ghost = False
1300
      self.os_fail = False
1301
      self.oslist = {}
1302

    
1303
  def ExpandNames(self):
1304
    self.needed_locks = {
1305
      locking.LEVEL_NODE: locking.ALL_SET,
1306
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1307
    }
1308
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1309

    
1310
  def _Error(self, ecode, item, msg, *args, **kwargs):
1311
    """Format an error message.
1312

1313
    Based on the opcode's error_codes parameter, either format a
1314
    parseable error code, or a simpler error string.
1315

1316
    This must be called only from Exec and functions called from Exec.
1317

1318
    """
1319
    ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
1320
    itype, etxt = ecode
1321
    # first complete the msg
1322
    if args:
1323
      msg = msg % args
1324
    # then format the whole message
1325
    if self.op.error_codes:
1326
      msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1327
    else:
1328
      if item:
1329
        item = " " + item
1330
      else:
1331
        item = ""
1332
      msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1333
    # and finally report it via the feedback_fn
1334
    self._feedback_fn("  - %s" % msg)
1335

    
1336
  def _ErrorIf(self, cond, *args, **kwargs):
1337
    """Log an error message if the passed condition is True.
1338

1339
    """
1340
    cond = bool(cond) or self.op.debug_simulate_errors
1341
    if cond:
1342
      self._Error(*args, **kwargs)
1343
    # do not mark the operation as failed for WARN cases only
1344
    if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
1345
      self.bad = self.bad or cond
1346

    
1347
  def _VerifyNode(self, ninfo, nresult):
1348
    """Perform some basic validation on data returned from a node.
1349

1350
      - check the result data structure is well formed and has all the
1351
        mandatory fields
1352
      - check ganeti version
1353

1354
    @type ninfo: L{objects.Node}
1355
    @param ninfo: the node to check
1356
    @param nresult: the results from the node
1357
    @rtype: boolean
1358
    @return: whether overall this call was successful (and we can expect
1359
         reasonable values in the respose)
1360

1361
    """
1362
    node = ninfo.name
1363
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1364

    
1365
    # main result, nresult should be a non-empty dict
1366
    test = not nresult or not isinstance(nresult, dict)
1367
    _ErrorIf(test, self.ENODERPC, node,
1368
                  "unable to verify node: no data returned")
1369
    if test:
1370
      return False
1371

    
1372
    # compares ganeti version
1373
    local_version = constants.PROTOCOL_VERSION
1374
    remote_version = nresult.get("version", None)
1375
    test = not (remote_version and
1376
                isinstance(remote_version, (list, tuple)) and
1377
                len(remote_version) == 2)
1378
    _ErrorIf(test, self.ENODERPC, node,
1379
             "connection to node returned invalid data")
1380
    if test:
1381
      return False
1382

    
1383
    test = local_version != remote_version[0]
1384
    _ErrorIf(test, self.ENODEVERSION, node,
1385
             "incompatible protocol versions: master %s,"
1386
             " node %s", local_version, remote_version[0])
1387
    if test:
1388
      return False
1389

    
1390
    # node seems compatible, we can actually try to look into its results
1391

    
1392
    # full package version
1393
    self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
1394
                  self.ENODEVERSION, node,
1395
                  "software version mismatch: master %s, node %s",
1396
                  constants.RELEASE_VERSION, remote_version[1],
1397
                  code=self.ETYPE_WARNING)
1398

    
1399
    hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
1400
    if ninfo.vm_capable and isinstance(hyp_result, dict):
1401
      for hv_name, hv_result in hyp_result.iteritems():
1402
        test = hv_result is not None
1403
        _ErrorIf(test, self.ENODEHV, node,
1404
                 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1405

    
1406
    hvp_result = nresult.get(constants.NV_HVPARAMS, None)
1407
    if ninfo.vm_capable and isinstance(hvp_result, list):
1408
      for item, hv_name, hv_result in hvp_result:
1409
        _ErrorIf(True, self.ENODEHV, node,
1410
                 "hypervisor %s parameter verify failure (source %s): %s",
1411
                 hv_name, item, hv_result)
1412

    
1413
    test = nresult.get(constants.NV_NODESETUP,
1414
                           ["Missing NODESETUP results"])
1415
    _ErrorIf(test, self.ENODESETUP, node, "node setup error: %s",
1416
             "; ".join(test))
1417

    
1418
    return True
1419

    
1420
  def _VerifyNodeTime(self, ninfo, nresult,
1421
                      nvinfo_starttime, nvinfo_endtime):
1422
    """Check the node time.
1423

1424
    @type ninfo: L{objects.Node}
1425
    @param ninfo: the node to check
1426
    @param nresult: the remote results for the node
1427
    @param nvinfo_starttime: the start time of the RPC call
1428
    @param nvinfo_endtime: the end time of the RPC call
1429

1430
    """
1431
    node = ninfo.name
1432
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1433

    
1434
    ntime = nresult.get(constants.NV_TIME, None)
1435
    try:
1436
      ntime_merged = utils.MergeTime(ntime)
1437
    except (ValueError, TypeError):
1438
      _ErrorIf(True, self.ENODETIME, node, "Node returned invalid time")
1439
      return
1440

    
1441
    if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
1442
      ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
1443
    elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
1444
      ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
1445
    else:
1446
      ntime_diff = None
1447

    
1448
    _ErrorIf(ntime_diff is not None, self.ENODETIME, node,
1449
             "Node time diverges by at least %s from master node time",
1450
             ntime_diff)
1451

    
1452
  def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
1453
    """Check the node time.
1454

1455
    @type ninfo: L{objects.Node}
1456
    @param ninfo: the node to check
1457
    @param nresult: the remote results for the node
1458
    @param vg_name: the configured VG name
1459

1460
    """
1461
    if vg_name is None:
1462
      return
1463

    
1464
    node = ninfo.name
1465
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1466

    
1467
    # checks vg existence and size > 20G
1468
    vglist = nresult.get(constants.NV_VGLIST, None)
1469
    test = not vglist
1470
    _ErrorIf(test, self.ENODELVM, node, "unable to check volume groups")
1471
    if not test:
1472
      vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
1473
                                            constants.MIN_VG_SIZE)
1474
      _ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
1475

    
1476
    # check pv names
1477
    pvlist = nresult.get(constants.NV_PVLIST, None)
1478
    test = pvlist is None
1479
    _ErrorIf(test, self.ENODELVM, node, "Can't get PV list from node")
1480
    if not test:
1481
      # check that ':' is not present in PV names, since it's a
1482
      # special character for lvcreate (denotes the range of PEs to
1483
      # use on the PV)
1484
      for _, pvname, owner_vg in pvlist:
1485
        test = ":" in pvname
1486
        _ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV"
1487
                 " '%s' of VG '%s'", pvname, owner_vg)
1488

    
1489
  def _VerifyNodeNetwork(self, ninfo, nresult):
1490
    """Check the node time.
1491

1492
    @type ninfo: L{objects.Node}
1493
    @param ninfo: the node to check
1494
    @param nresult: the remote results for the node
1495

1496
    """
1497
    node = ninfo.name
1498
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1499

    
1500
    test = constants.NV_NODELIST not in nresult
1501
    _ErrorIf(test, self.ENODESSH, node,
1502
             "node hasn't returned node ssh connectivity data")
1503
    if not test:
1504
      if nresult[constants.NV_NODELIST]:
1505
        for a_node, a_msg in nresult[constants.NV_NODELIST].items():
1506
          _ErrorIf(True, self.ENODESSH, node,
1507
                   "ssh communication with node '%s': %s", a_node, a_msg)
1508

    
1509
    test = constants.NV_NODENETTEST not in nresult
1510
    _ErrorIf(test, self.ENODENET, node,
1511
             "node hasn't returned node tcp connectivity data")
1512
    if not test:
1513
      if nresult[constants.NV_NODENETTEST]:
1514
        nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
1515
        for anode in nlist:
1516
          _ErrorIf(True, self.ENODENET, node,
1517
                   "tcp communication with node '%s': %s",
1518
                   anode, nresult[constants.NV_NODENETTEST][anode])
1519

    
1520
    test = constants.NV_MASTERIP not in nresult
1521
    _ErrorIf(test, self.ENODENET, node,
1522
             "node hasn't returned node master IP reachability data")
1523
    if not test:
1524
      if not nresult[constants.NV_MASTERIP]:
1525
        if node == self.master_node:
1526
          msg = "the master node cannot reach the master IP (not configured?)"
1527
        else:
1528
          msg = "cannot reach the master IP"
1529
        _ErrorIf(True, self.ENODENET, node, msg)
1530

    
1531
  def _VerifyInstance(self, instance, instanceconfig, node_image,
1532
                      diskstatus):
1533
    """Verify an instance.
1534

1535
    This function checks to see if the required block devices are
1536
    available on the instance's node.
1537

1538
    """
1539
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1540
    node_current = instanceconfig.primary_node
1541

    
1542
    node_vol_should = {}
1543
    instanceconfig.MapLVsByNode(node_vol_should)
1544

    
1545
    for node in node_vol_should:
1546
      n_img = node_image[node]
1547
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1548
        # ignore missing volumes on offline or broken nodes
1549
        continue
1550
      for volume in node_vol_should[node]:
1551
        test = volume not in n_img.volumes
1552
        _ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
1553
                 "volume %s missing on node %s", volume, node)
1554

    
1555
    if instanceconfig.admin_up:
1556
      pri_img = node_image[node_current]
1557
      test = instance not in pri_img.instances and not pri_img.offline
1558
      _ErrorIf(test, self.EINSTANCEDOWN, instance,
1559
               "instance not running on its primary node %s",
1560
               node_current)
1561

    
1562
    for node, n_img in node_image.items():
1563
      if node != node_current:
1564
        test = instance in n_img.instances
1565
        _ErrorIf(test, self.EINSTANCEWRONGNODE, instance,
1566
                 "instance should not run on node %s", node)
1567

    
1568
    diskdata = [(nname, success, status, idx)
1569
                for (nname, disks) in diskstatus.items()
1570
                for idx, (success, status) in enumerate(disks)]
1571

    
1572
    for nname, success, bdev_status, idx in diskdata:
1573
      # the 'ghost node' construction in Exec() ensures that we have a
1574
      # node here
1575
      snode = node_image[nname]
1576
      bad_snode = snode.ghost or snode.offline
1577
      _ErrorIf(instanceconfig.admin_up and not success and not bad_snode,
1578
               self.EINSTANCEFAULTYDISK, instance,
1579
               "couldn't retrieve status for disk/%s on %s: %s",
1580
               idx, nname, bdev_status)
1581
      _ErrorIf((instanceconfig.admin_up and success and
1582
                bdev_status.ldisk_status == constants.LDS_FAULTY),
1583
               self.EINSTANCEFAULTYDISK, instance,
1584
               "disk/%s on %s is faulty", idx, nname)
1585

    
1586
  def _VerifyOrphanVolumes(self, node_vol_should, node_image, reserved):
1587
    """Verify if there are any unknown volumes in the cluster.
1588

1589
    The .os, .swap and backup volumes are ignored. All other volumes are
1590
    reported as unknown.
1591

1592
    @type reserved: L{ganeti.utils.FieldSet}
1593
    @param reserved: a FieldSet of reserved volume names
1594

1595
    """
1596
    for node, n_img in node_image.items():
1597
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1598
        # skip non-healthy nodes
1599
        continue
1600
      for volume in n_img.volumes:
1601
        test = ((node not in node_vol_should or
1602
                volume not in node_vol_should[node]) and
1603
                not reserved.Matches(volume))
1604
        self._ErrorIf(test, self.ENODEORPHANLV, node,
1605
                      "volume %s is unknown", volume)
1606

    
1607
  def _VerifyOrphanInstances(self, instancelist, node_image):
1608
    """Verify the list of running instances.
1609

1610
    This checks what instances are running but unknown to the cluster.
1611

1612
    """
1613
    for node, n_img in node_image.items():
1614
      for o_inst in n_img.instances:
1615
        test = o_inst not in instancelist
1616
        self._ErrorIf(test, self.ENODEORPHANINSTANCE, node,
1617
                      "instance %s on node %s should not exist", o_inst, node)
1618

    
1619
  def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
1620
    """Verify N+1 Memory Resilience.
1621

1622
    Check that if one single node dies we can still start all the
1623
    instances it was primary for.
1624

1625
    """
1626
    cluster_info = self.cfg.GetClusterInfo()
1627
    for node, n_img in node_image.items():
1628
      # This code checks that every node which is now listed as
1629
      # secondary has enough memory to host all instances it is
1630
      # supposed to should a single other node in the cluster fail.
1631
      # FIXME: not ready for failover to an arbitrary node
1632
      # FIXME: does not support file-backed instances
1633
      # WARNING: we currently take into account down instances as well
1634
      # as up ones, considering that even if they're down someone
1635
      # might want to start them even in the event of a node failure.
1636
      if n_img.offline:
1637
        # we're skipping offline nodes from the N+1 warning, since
1638
        # most likely we don't have good memory infromation from them;
1639
        # we already list instances living on such nodes, and that's
1640
        # enough warning
1641
        continue
1642
      for prinode, instances in n_img.sbp.items():
1643
        needed_mem = 0
1644
        for instance in instances:
1645
          bep = cluster_info.FillBE(instance_cfg[instance])
1646
          if bep[constants.BE_AUTO_BALANCE]:
1647
            needed_mem += bep[constants.BE_MEMORY]
1648
        test = n_img.mfree < needed_mem
1649
        self._ErrorIf(test, self.ENODEN1, node,
1650
                      "not enough memory to accomodate instance failovers"
1651
                      " should node %s fail", prinode)
1652

    
1653
  def _VerifyNodeFiles(self, ninfo, nresult, file_list, local_cksum,
1654
                       master_files):
1655
    """Verifies and computes the node required file checksums.
1656

1657
    @type ninfo: L{objects.Node}
1658
    @param ninfo: the node to check
1659
    @param nresult: the remote results for the node
1660
    @param file_list: required list of files
1661
    @param local_cksum: dictionary of local files and their checksums
1662
    @param master_files: list of files that only masters should have
1663

1664
    """
1665
    node = ninfo.name
1666
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1667

    
1668
    remote_cksum = nresult.get(constants.NV_FILELIST, None)
1669
    test = not isinstance(remote_cksum, dict)
1670
    _ErrorIf(test, self.ENODEFILECHECK, node,
1671
             "node hasn't returned file checksum data")
1672
    if test:
1673
      return
1674

    
1675
    for file_name in file_list:
1676
      node_is_mc = ninfo.master_candidate
1677
      must_have = (file_name not in master_files) or node_is_mc
1678
      # missing
1679
      test1 = file_name not in remote_cksum
1680
      # invalid checksum
1681
      test2 = not test1 and remote_cksum[file_name] != local_cksum[file_name]
1682
      # existing and good
1683
      test3 = not test1 and remote_cksum[file_name] == local_cksum[file_name]
1684
      _ErrorIf(test1 and must_have, self.ENODEFILECHECK, node,
1685
               "file '%s' missing", file_name)
1686
      _ErrorIf(test2 and must_have, self.ENODEFILECHECK, node,
1687
               "file '%s' has wrong checksum", file_name)
1688
      # not candidate and this is not a must-have file
1689
      _ErrorIf(test2 and not must_have, self.ENODEFILECHECK, node,
1690
               "file '%s' should not exist on non master"
1691
               " candidates (and the file is outdated)", file_name)
1692
      # all good, except non-master/non-must have combination
1693
      _ErrorIf(test3 and not must_have, self.ENODEFILECHECK, node,
1694
               "file '%s' should not exist"
1695
               " on non master candidates", file_name)
1696

    
1697
  def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_helper,
1698
                      drbd_map):
1699
    """Verifies and the node DRBD status.
1700

1701
    @type ninfo: L{objects.Node}
1702
    @param ninfo: the node to check
1703
    @param nresult: the remote results for the node
1704
    @param instanceinfo: the dict of instances
1705
    @param drbd_helper: the configured DRBD usermode helper
1706
    @param drbd_map: the DRBD map as returned by
1707
        L{ganeti.config.ConfigWriter.ComputeDRBDMap}
1708

1709
    """
1710
    node = ninfo.name
1711
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1712

    
1713
    if drbd_helper:
1714
      helper_result = nresult.get(constants.NV_DRBDHELPER, None)
1715
      test = (helper_result == None)
1716
      _ErrorIf(test, self.ENODEDRBDHELPER, node,
1717
               "no drbd usermode helper returned")
1718
      if helper_result:
1719
        status, payload = helper_result
1720
        test = not status
1721
        _ErrorIf(test, self.ENODEDRBDHELPER, node,
1722
                 "drbd usermode helper check unsuccessful: %s", payload)
1723
        test = status and (payload != drbd_helper)
1724
        _ErrorIf(test, self.ENODEDRBDHELPER, node,
1725
                 "wrong drbd usermode helper: %s", payload)
1726

    
1727
    # compute the DRBD minors
1728
    node_drbd = {}
1729
    for minor, instance in drbd_map[node].items():
1730
      test = instance not in instanceinfo
1731
      _ErrorIf(test, self.ECLUSTERCFG, None,
1732
               "ghost instance '%s' in temporary DRBD map", instance)
1733
        # ghost instance should not be running, but otherwise we
1734
        # don't give double warnings (both ghost instance and
1735
        # unallocated minor in use)
1736
      if test:
1737
        node_drbd[minor] = (instance, False)
1738
      else:
1739
        instance = instanceinfo[instance]
1740
        node_drbd[minor] = (instance.name, instance.admin_up)
1741

    
1742
    # and now check them
1743
    used_minors = nresult.get(constants.NV_DRBDLIST, [])
1744
    test = not isinstance(used_minors, (tuple, list))
1745
    _ErrorIf(test, self.ENODEDRBD, node,
1746
             "cannot parse drbd status file: %s", str(used_minors))
1747
    if test:
1748
      # we cannot check drbd status
1749
      return
1750

    
1751
    for minor, (iname, must_exist) in node_drbd.items():
1752
      test = minor not in used_minors and must_exist
1753
      _ErrorIf(test, self.ENODEDRBD, node,
1754
               "drbd minor %d of instance %s is not active", minor, iname)
1755
    for minor in used_minors:
1756
      test = minor not in node_drbd
1757
      _ErrorIf(test, self.ENODEDRBD, node,
1758
               "unallocated drbd minor %d is in use", minor)
1759

    
1760
  def _UpdateNodeOS(self, ninfo, nresult, nimg):
1761
    """Builds the node OS structures.
1762

1763
    @type ninfo: L{objects.Node}
1764
    @param ninfo: the node to check
1765
    @param nresult: the remote results for the node
1766
    @param nimg: the node image object
1767

1768
    """
1769
    node = ninfo.name
1770
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1771

    
1772
    remote_os = nresult.get(constants.NV_OSLIST, None)
1773
    test = (not isinstance(remote_os, list) or
1774
            not compat.all(isinstance(v, list) and len(v) == 7
1775
                           for v in remote_os))
1776

    
1777
    _ErrorIf(test, self.ENODEOS, node,
1778
             "node hasn't returned valid OS data")
1779

    
1780
    nimg.os_fail = test
1781

    
1782
    if test:
1783
      return
1784

    
1785
    os_dict = {}
1786

    
1787
    for (name, os_path, status, diagnose,
1788
         variants, parameters, api_ver) in nresult[constants.NV_OSLIST]:
1789

    
1790
      if name not in os_dict:
1791
        os_dict[name] = []
1792

    
1793
      # parameters is a list of lists instead of list of tuples due to
1794
      # JSON lacking a real tuple type, fix it:
1795
      parameters = [tuple(v) for v in parameters]
1796
      os_dict[name].append((os_path, status, diagnose,
1797
                            set(variants), set(parameters), set(api_ver)))
1798

    
1799
    nimg.oslist = os_dict
1800

    
1801
  def _VerifyNodeOS(self, ninfo, nimg, base):
1802
    """Verifies the node OS list.
1803

1804
    @type ninfo: L{objects.Node}
1805
    @param ninfo: the node to check
1806
    @param nimg: the node image object
1807
    @param base: the 'template' node we match against (e.g. from the master)
1808

1809
    """
1810
    node = ninfo.name
1811
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1812

    
1813
    assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?"
1814

    
1815
    for os_name, os_data in nimg.oslist.items():
1816
      assert os_data, "Empty OS status for OS %s?!" % os_name
1817
      f_path, f_status, f_diag, f_var, f_param, f_api = os_data[0]
1818
      _ErrorIf(not f_status, self.ENODEOS, node,
1819
               "Invalid OS %s (located at %s): %s", os_name, f_path, f_diag)
1820
      _ErrorIf(len(os_data) > 1, self.ENODEOS, node,
1821
               "OS '%s' has multiple entries (first one shadows the rest): %s",
1822
               os_name, utils.CommaJoin([v[0] for v in os_data]))
1823
      # this will catched in backend too
1824
      _ErrorIf(compat.any(v >= constants.OS_API_V15 for v in f_api)
1825
               and not f_var, self.ENODEOS, node,
1826
               "OS %s with API at least %d does not declare any variant",
1827
               os_name, constants.OS_API_V15)
1828
      # comparisons with the 'base' image
1829
      test = os_name not in base.oslist
1830
      _ErrorIf(test, self.ENODEOS, node,
1831
               "Extra OS %s not present on reference node (%s)",
1832
               os_name, base.name)
1833
      if test:
1834
        continue
1835
      assert base.oslist[os_name], "Base node has empty OS status?"
1836
      _, b_status, _, b_var, b_param, b_api = base.oslist[os_name][0]
1837
      if not b_status:
1838
        # base OS is invalid, skipping
1839
        continue
1840
      for kind, a, b in [("API version", f_api, b_api),
1841
                         ("variants list", f_var, b_var),
1842
                         ("parameters", f_param, b_param)]:
1843
        _ErrorIf(a != b, self.ENODEOS, node,
1844
                 "OS %s %s differs from reference node %s: %s vs. %s",
1845
                 kind, os_name, base.name,
1846
                 utils.CommaJoin(a), utils.CommaJoin(b))
1847

    
1848
    # check any missing OSes
1849
    missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
1850
    _ErrorIf(missing, self.ENODEOS, node,
1851
             "OSes present on reference node %s but missing on this node: %s",
1852
             base.name, utils.CommaJoin(missing))
1853

    
1854
  def _VerifyOob(self, ninfo, nresult):
1855
    """Verifies out of band functionality of a node.
1856

1857
    @type ninfo: L{objects.Node}
1858
    @param ninfo: the node to check
1859
    @param nresult: the remote results for the node
1860

1861
    """
1862
    node = ninfo.name
1863
    # We just have to verify the paths on master and/or master candidates
1864
    # as the oob helper is invoked on the master
1865
    if ((ninfo.master_candidate or ninfo.master_capable) and
1866
        constants.NV_OOB_PATHS in nresult):
1867
      for path_result in nresult[constants.NV_OOB_PATHS]:
1868
        self._ErrorIf(path_result, self.ENODEOOBPATH, node, path_result)
1869

    
1870
  def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
1871
    """Verifies and updates the node volume data.
1872

1873
    This function will update a L{NodeImage}'s internal structures
1874
    with data from the remote call.
1875

1876
    @type ninfo: L{objects.Node}
1877
    @param ninfo: the node to check
1878
    @param nresult: the remote results for the node
1879
    @param nimg: the node image object
1880
    @param vg_name: the configured VG name
1881

1882
    """
1883
    node = ninfo.name
1884
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1885

    
1886
    nimg.lvm_fail = True
1887
    lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1888
    if vg_name is None:
1889
      pass
1890
    elif isinstance(lvdata, basestring):
1891
      _ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s",
1892
               utils.SafeEncode(lvdata))
1893
    elif not isinstance(lvdata, dict):
1894
      _ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)")
1895
    else:
1896
      nimg.volumes = lvdata
1897
      nimg.lvm_fail = False
1898

    
1899
  def _UpdateNodeInstances(self, ninfo, nresult, nimg):
1900
    """Verifies and updates the node instance list.
1901

1902
    If the listing was successful, then updates this node's instance
1903
    list. Otherwise, it marks the RPC call as failed for the instance
1904
    list key.
1905

1906
    @type ninfo: L{objects.Node}
1907
    @param ninfo: the node to check
1908
    @param nresult: the remote results for the node
1909
    @param nimg: the node image object
1910

1911
    """
1912
    idata = nresult.get(constants.NV_INSTANCELIST, None)
1913
    test = not isinstance(idata, list)
1914
    self._ErrorIf(test, self.ENODEHV, ninfo.name, "rpc call to node failed"
1915
                  " (instancelist): %s", utils.SafeEncode(str(idata)))
1916
    if test:
1917
      nimg.hyp_fail = True
1918
    else:
1919
      nimg.instances = idata
1920

    
1921
  def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
1922
    """Verifies and computes a node information map
1923

1924
    @type ninfo: L{objects.Node}
1925
    @param ninfo: the node to check
1926
    @param nresult: the remote results for the node
1927
    @param nimg: the node image object
1928
    @param vg_name: the configured VG name
1929

1930
    """
1931
    node = ninfo.name
1932
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1933

    
1934
    # try to read free memory (from the hypervisor)
1935
    hv_info = nresult.get(constants.NV_HVINFO, None)
1936
    test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
1937
    _ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)")
1938
    if not test:
1939
      try:
1940
        nimg.mfree = int(hv_info["memory_free"])
1941
      except (ValueError, TypeError):
1942
        _ErrorIf(True, self.ENODERPC, node,
1943
                 "node returned invalid nodeinfo, check hypervisor")
1944

    
1945
    # FIXME: devise a free space model for file based instances as well
1946
    if vg_name is not None:
1947
      test = (constants.NV_VGLIST not in nresult or
1948
              vg_name not in nresult[constants.NV_VGLIST])
1949
      _ErrorIf(test, self.ENODELVM, node,
1950
               "node didn't return data for the volume group '%s'"
1951
               " - it is either missing or broken", vg_name)
1952
      if not test:
1953
        try:
1954
          nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
1955
        except (ValueError, TypeError):
1956
          _ErrorIf(True, self.ENODERPC, node,
1957
                   "node returned invalid LVM info, check LVM status")
1958

    
1959
  def _CollectDiskInfo(self, nodelist, node_image, instanceinfo):
1960
    """Gets per-disk status information for all instances.
1961

1962
    @type nodelist: list of strings
1963
    @param nodelist: Node names
1964
    @type node_image: dict of (name, L{objects.Node})
1965
    @param node_image: Node objects
1966
    @type instanceinfo: dict of (name, L{objects.Instance})
1967
    @param instanceinfo: Instance objects
1968
    @rtype: {instance: {node: [(succes, payload)]}}
1969
    @return: a dictionary of per-instance dictionaries with nodes as
1970
        keys and disk information as values; the disk information is a
1971
        list of tuples (success, payload)
1972

1973
    """
1974
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1975

    
1976
    node_disks = {}
1977
    node_disks_devonly = {}
1978
    diskless_instances = set()
1979
    diskless = constants.DT_DISKLESS
1980

    
1981
    for nname in nodelist:
1982
      node_instances = list(itertools.chain(node_image[nname].pinst,
1983
                                            node_image[nname].sinst))
1984
      diskless_instances.update(inst for inst in node_instances
1985
                                if instanceinfo[inst].disk_template == diskless)
1986
      disks = [(inst, disk)
1987
               for inst in node_instances
1988
               for disk in instanceinfo[inst].disks]
1989

    
1990
      if not disks:
1991
        # No need to collect data
1992
        continue
1993

    
1994
      node_disks[nname] = disks
1995

    
1996
      # Creating copies as SetDiskID below will modify the objects and that can
1997
      # lead to incorrect data returned from nodes
1998
      devonly = [dev.Copy() for (_, dev) in disks]
1999

    
2000
      for dev in devonly:
2001
        self.cfg.SetDiskID(dev, nname)
2002

    
2003
      node_disks_devonly[nname] = devonly
2004

    
2005
    assert len(node_disks) == len(node_disks_devonly)
2006

    
2007
    # Collect data from all nodes with disks
2008
    result = self.rpc.call_blockdev_getmirrorstatus_multi(node_disks.keys(),
2009
                                                          node_disks_devonly)
2010

    
2011
    assert len(result) == len(node_disks)
2012

    
2013
    instdisk = {}
2014

    
2015
    for (nname, nres) in result.items():
2016
      disks = node_disks[nname]
2017

    
2018
      if nres.offline:
2019
        # No data from this node
2020
        data = len(disks) * [(False, "node offline")]
2021
      else:
2022
        msg = nres.fail_msg
2023
        _ErrorIf(msg, self.ENODERPC, nname,
2024
                 "while getting disk information: %s", msg)
2025
        if msg:
2026
          # No data from this node
2027
          data = len(disks) * [(False, msg)]
2028
        else:
2029
          data = []
2030
          for idx, i in enumerate(nres.payload):
2031
            if isinstance(i, (tuple, list)) and len(i) == 2:
2032
              data.append(i)
2033
            else:
2034
              logging.warning("Invalid result from node %s, entry %d: %s",
2035
                              nname, idx, i)
2036
              data.append((False, "Invalid result from the remote node"))
2037

    
2038
      for ((inst, _), status) in zip(disks, data):
2039
        instdisk.setdefault(inst, {}).setdefault(nname, []).append(status)
2040

    
2041
    # Add empty entries for diskless instances.
2042
    for inst in diskless_instances:
2043
      assert inst not in instdisk
2044
      instdisk[inst] = {}
2045

    
2046
    assert compat.all(len(statuses) == len(instanceinfo[inst].disks) and
2047
                      len(nnames) <= len(instanceinfo[inst].all_nodes) and
2048
                      compat.all(isinstance(s, (tuple, list)) and
2049
                                 len(s) == 2 for s in statuses)
2050
                      for inst, nnames in instdisk.items()
2051
                      for nname, statuses in nnames.items())
2052
    assert set(instdisk) == set(instanceinfo), "instdisk consistency failure"
2053

    
2054
    return instdisk
2055

    
2056
  def _VerifyHVP(self, hvp_data):
2057
    """Verifies locally the syntax of the hypervisor parameters.
2058

2059
    """
2060
    for item, hv_name, hv_params in hvp_data:
2061
      msg = ("hypervisor %s parameters syntax check (source %s): %%s" %
2062
             (item, hv_name))
2063
      try:
2064
        hv_class = hypervisor.GetHypervisor(hv_name)
2065
        utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2066
        hv_class.CheckParameterSyntax(hv_params)
2067
      except errors.GenericError, err:
2068
        self._ErrorIf(True, self.ECLUSTERCFG, None, msg % str(err))
2069

    
2070

    
2071
  def BuildHooksEnv(self):
2072
    """Build hooks env.
2073

2074
    Cluster-Verify hooks just ran in the post phase and their failure makes
2075
    the output be logged in the verify output and the verification to fail.
2076

2077
    """
2078
    all_nodes = self.cfg.GetNodeList()
2079
    env = {
2080
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
2081
      }
2082
    for node in self.cfg.GetAllNodesInfo().values():
2083
      env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
2084

    
2085
    return env, [], all_nodes
2086

    
2087
  def Exec(self, feedback_fn):
2088
    """Verify integrity of cluster, performing various test on nodes.
2089

2090
    """
2091
    # This method has too many local variables. pylint: disable-msg=R0914
2092
    self.bad = False
2093
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
2094
    verbose = self.op.verbose
2095
    self._feedback_fn = feedback_fn
2096
    feedback_fn("* Verifying global settings")
2097
    for msg in self.cfg.VerifyConfig():
2098
      _ErrorIf(True, self.ECLUSTERCFG, None, msg)
2099

    
2100
    # Check the cluster certificates
2101
    for cert_filename in constants.ALL_CERT_FILES:
2102
      (errcode, msg) = _VerifyCertificate(cert_filename)
2103
      _ErrorIf(errcode, self.ECLUSTERCERT, None, msg, code=errcode)
2104

    
2105
    vg_name = self.cfg.GetVGName()
2106
    drbd_helper = self.cfg.GetDRBDHelper()
2107
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
2108
    cluster = self.cfg.GetClusterInfo()
2109
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
2110
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
2111
    nodeinfo_byname = dict(zip(nodelist, nodeinfo))
2112
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
2113
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
2114
                        for iname in instancelist)
2115
    groupinfo = self.cfg.GetAllNodeGroupsInfo()
2116
    i_non_redundant = [] # Non redundant instances
2117
    i_non_a_balanced = [] # Non auto-balanced instances
2118
    n_offline = 0 # Count of offline nodes
2119
    n_drained = 0 # Count of nodes being drained
2120
    node_vol_should = {}
2121

    
2122
    # FIXME: verify OS list
2123
    # do local checksums
2124
    master_files = [constants.CLUSTER_CONF_FILE]
2125
    master_node = self.master_node = self.cfg.GetMasterNode()
2126
    master_ip = self.cfg.GetMasterIP()
2127

    
2128
    file_names = ssconf.SimpleStore().GetFileList()
2129
    file_names.extend(constants.ALL_CERT_FILES)
2130
    file_names.extend(master_files)
2131
    if cluster.modify_etc_hosts:
2132
      file_names.append(constants.ETC_HOSTS)
2133

    
2134
    local_checksums = utils.FingerprintFiles(file_names)
2135

    
2136
    # Compute the set of hypervisor parameters
2137
    hvp_data = []
2138
    for hv_name in hypervisors:
2139
      hvp_data.append(("cluster", hv_name, cluster.GetHVDefaults(hv_name)))
2140
    for os_name, os_hvp in cluster.os_hvp.items():
2141
      for hv_name, hv_params in os_hvp.items():
2142
        if not hv_params:
2143
          continue
2144
        full_params = cluster.GetHVDefaults(hv_name, os_name=os_name)
2145
        hvp_data.append(("os %s" % os_name, hv_name, full_params))
2146
    # TODO: collapse identical parameter values in a single one
2147
    for instance in instanceinfo.values():
2148
      if not instance.hvparams:
2149
        continue
2150
      hvp_data.append(("instance %s" % instance.name, instance.hypervisor,
2151
                       cluster.FillHV(instance)))
2152
    # and verify them locally
2153
    self._VerifyHVP(hvp_data)
2154

    
2155
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
2156
    node_verify_param = {
2157
      constants.NV_FILELIST: file_names,
2158
      constants.NV_NODELIST: [node.name for node in nodeinfo
2159
                              if not node.offline],
2160
      constants.NV_HYPERVISOR: hypervisors,
2161
      constants.NV_HVPARAMS: hvp_data,
2162
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
2163
                                  node.secondary_ip) for node in nodeinfo
2164
                                 if not node.offline],
2165
      constants.NV_INSTANCELIST: hypervisors,
2166
      constants.NV_VERSION: None,
2167
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
2168
      constants.NV_NODESETUP: None,
2169
      constants.NV_TIME: None,
2170
      constants.NV_MASTERIP: (master_node, master_ip),
2171
      constants.NV_OSLIST: None,
2172
      constants.NV_VMNODES: self.cfg.GetNonVmCapableNodeList(),
2173
      }
2174

    
2175
    if vg_name is not None:
2176
      node_verify_param[constants.NV_VGLIST] = None
2177
      node_verify_param[constants.NV_LVLIST] = vg_name
2178
      node_verify_param[constants.NV_PVLIST] = [vg_name]
2179
      node_verify_param[constants.NV_DRBDLIST] = None
2180

    
2181
    if drbd_helper:
2182
      node_verify_param[constants.NV_DRBDHELPER] = drbd_helper
2183

    
2184
    # Build our expected cluster state
2185
    node_image = dict((node.name, self.NodeImage(offline=node.offline,
2186
                                                 name=node.name,
2187
                                                 vm_capable=node.vm_capable))
2188
                      for node in nodeinfo)
2189

    
2190
    # Gather OOB paths
2191
    oob_paths = []
2192
    for node in nodeinfo:
2193
      path = _SupportsOob(self.cfg, node)
2194
      if path and path not in oob_paths:
2195
        oob_paths.append(path)
2196

    
2197
    if oob_paths:
2198
      node_verify_param[constants.NV_OOB_PATHS] = oob_paths
2199

    
2200
    for instance in instancelist:
2201
      inst_config = instanceinfo[instance]
2202

    
2203
      for nname in inst_config.all_nodes:
2204
        if nname not in node_image:
2205
          # ghost node
2206
          gnode = self.NodeImage(name=nname)
2207
          gnode.ghost = True
2208
          node_image[nname] = gnode
2209

    
2210
      inst_config.MapLVsByNode(node_vol_should)
2211

    
2212
      pnode = inst_config.primary_node
2213
      node_image[pnode].pinst.append(instance)
2214

    
2215
      for snode in inst_config.secondary_nodes:
2216
        nimg = node_image[snode]
2217
        nimg.sinst.append(instance)
2218
        if pnode not in nimg.sbp:
2219
          nimg.sbp[pnode] = []
2220
        nimg.sbp[pnode].append(instance)
2221

    
2222
    # At this point, we have the in-memory data structures complete,
2223
    # except for the runtime information, which we'll gather next
2224

    
2225
    # Due to the way our RPC system works, exact response times cannot be
2226
    # guaranteed (e.g. a broken node could run into a timeout). By keeping the
2227
    # time before and after executing the request, we can at least have a time
2228
    # window.
2229
    nvinfo_starttime = time.time()
2230
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
2231
                                           self.cfg.GetClusterName())
2232
    nvinfo_endtime = time.time()
2233

    
2234
    all_drbd_map = self.cfg.ComputeDRBDMap()
2235

    
2236
    feedback_fn("* Gathering disk information (%s nodes)" % len(nodelist))
2237
    instdisk = self._CollectDiskInfo(nodelist, node_image, instanceinfo)
2238

    
2239
    feedback_fn("* Verifying node status")
2240

    
2241
    refos_img = None
2242

    
2243
    for node_i in nodeinfo:
2244
      node = node_i.name
2245
      nimg = node_image[node]
2246

    
2247
      if node_i.offline:
2248
        if verbose:
2249
          feedback_fn("* Skipping offline node %s" % (node,))
2250
        n_offline += 1
2251
        continue
2252

    
2253
      if node == master_node:
2254
        ntype = "master"
2255
      elif node_i.master_candidate:
2256
        ntype = "master candidate"
2257
      elif node_i.drained:
2258
        ntype = "drained"
2259
        n_drained += 1
2260
      else:
2261
        ntype = "regular"
2262
      if verbose:
2263
        feedback_fn("* Verifying node %s (%s)" % (node, ntype))
2264

    
2265
      msg = all_nvinfo[node].fail_msg
2266
      _ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg)
2267
      if msg:
2268
        nimg.rpc_fail = True
2269
        continue
2270

    
2271
      nresult = all_nvinfo[node].payload
2272

    
2273
      nimg.call_ok = self._VerifyNode(node_i, nresult)
2274
      self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
2275
      self._VerifyNodeNetwork(node_i, nresult)
2276
      self._VerifyNodeFiles(node_i, nresult, file_names, local_checksums,
2277
                            master_files)
2278

    
2279
      self._VerifyOob(node_i, nresult)
2280

    
2281
      if nimg.vm_capable:
2282
        self._VerifyNodeLVM(node_i, nresult, vg_name)
2283
        self._VerifyNodeDrbd(node_i, nresult, instanceinfo, drbd_helper,
2284
                             all_drbd_map)
2285

    
2286
        self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
2287
        self._UpdateNodeInstances(node_i, nresult, nimg)
2288
        self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
2289
        self._UpdateNodeOS(node_i, nresult, nimg)
2290
        if not nimg.os_fail:
2291
          if refos_img is None:
2292
            refos_img = nimg
2293
          self._VerifyNodeOS(node_i, nimg, refos_img)
2294

    
2295
    feedback_fn("* Verifying instance status")
2296
    for instance in instancelist:
2297
      if verbose:
2298
        feedback_fn("* Verifying instance %s" % instance)
2299
      inst_config = instanceinfo[instance]
2300
      self._VerifyInstance(instance, inst_config, node_image,
2301
                           instdisk[instance])
2302
      inst_nodes_offline = []
2303

    
2304
      pnode = inst_config.primary_node
2305
      pnode_img = node_image[pnode]
2306
      _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
2307
               self.ENODERPC, pnode, "instance %s, connection to"
2308
               " primary node failed", instance)
2309

    
2310
      _ErrorIf(pnode_img.offline, self.EINSTANCEBADNODE, instance,
2311
               "instance lives on offline node %s", inst_config.primary_node)
2312

    
2313
      # If the instance is non-redundant we cannot survive losing its primary
2314
      # node, so we are not N+1 compliant. On the other hand we have no disk
2315
      # templates with more than one secondary so that situation is not well
2316
      # supported either.
2317
      # FIXME: does not support file-backed instances
2318
      if not inst_config.secondary_nodes:
2319
        i_non_redundant.append(instance)
2320

    
2321
      _ErrorIf(len(inst_config.secondary_nodes) > 1, self.EINSTANCELAYOUT,
2322
               instance, "instance has multiple secondary nodes: %s",
2323
               utils.CommaJoin(inst_config.secondary_nodes),
2324
               code=self.ETYPE_WARNING)
2325

    
2326
      if inst_config.disk_template in constants.DTS_INT_MIRROR:
2327
        pnode = inst_config.primary_node
2328
        instance_nodes = utils.NiceSort(inst_config.all_nodes)
2329
        instance_groups = {}
2330

    
2331
        for node in instance_nodes:
2332
          instance_groups.setdefault(nodeinfo_byname[node].group,
2333
                                     []).append(node)
2334

    
2335
        pretty_list = [
2336
          "%s (group %s)" % (utils.CommaJoin(nodes), groupinfo[group].name)
2337
          # Sort so that we always list the primary node first.
2338
          for group, nodes in sorted(instance_groups.items(),
2339
                                     key=lambda (_, nodes): pnode in nodes,
2340
                                     reverse=True)]
2341

    
2342
        self._ErrorIf(len(instance_groups) > 1, self.EINSTANCESPLITGROUPS,
2343
                      instance, "instance has primary and secondary nodes in"
2344
                      " different groups: %s", utils.CommaJoin(pretty_list),
2345
                      code=self.ETYPE_WARNING)
2346

    
2347
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
2348
        i_non_a_balanced.append(instance)
2349

    
2350
      for snode in inst_config.secondary_nodes:
2351
        s_img = node_image[snode]
2352
        _ErrorIf(s_img.rpc_fail and not s_img.offline, self.ENODERPC, snode,
2353
                 "instance %s, connection to secondary node failed", instance)
2354

    
2355
        if s_img.offline:
2356
          inst_nodes_offline.append(snode)
2357

    
2358
      # warn that the instance lives on offline nodes
2359
      _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
2360
               "instance has offline secondary node(s) %s",
2361
               utils.CommaJoin(inst_nodes_offline))
2362
      # ... or ghost/non-vm_capable nodes
2363
      for node in inst_config.all_nodes:
2364
        _ErrorIf(node_image[node].ghost, self.EINSTANCEBADNODE, instance,
2365
                 "instance lives on ghost node %s", node)
2366
        _ErrorIf(not node_image[node].vm_capable, self.EINSTANCEBADNODE,
2367
                 instance, "instance lives on non-vm_capable node %s", node)
2368

    
2369
    feedback_fn("* Verifying orphan volumes")
2370
    reserved = utils.FieldSet(*cluster.reserved_lvs)
2371
    self._VerifyOrphanVolumes(node_vol_should, node_image, reserved)
2372

    
2373
    feedback_fn("* Verifying orphan instances")
2374
    self._VerifyOrphanInstances(instancelist, node_image)
2375

    
2376
    if constants.VERIFY_NPLUSONE_MEM not in self.op.skip_checks:
2377
      feedback_fn("* Verifying N+1 Memory redundancy")
2378
      self._VerifyNPlusOneMemory(node_image, instanceinfo)
2379

    
2380
    feedback_fn("* Other Notes")
2381
    if i_non_redundant:
2382
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
2383
                  % len(i_non_redundant))
2384

    
2385
    if i_non_a_balanced:
2386
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
2387
                  % len(i_non_a_balanced))
2388

    
2389
    if n_offline:
2390
      feedback_fn("  - NOTICE: %d offline node(s) found." % n_offline)
2391

    
2392
    if n_drained:
2393
      feedback_fn("  - NOTICE: %d drained node(s) found." % n_drained)
2394

    
2395
    return not self.bad
2396

    
2397
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
2398
    """Analyze the post-hooks' result
2399

2400
    This method analyses the hook result, handles it, and sends some
2401
    nicely-formatted feedback back to the user.
2402

2403
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
2404
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
2405
    @param hooks_results: the results of the multi-node hooks rpc call
2406
    @param feedback_fn: function used send feedback back to the caller
2407
    @param lu_result: previous Exec result
2408
    @return: the new Exec result, based on the previous result
2409
        and hook results
2410

2411
    """
2412
    # We only really run POST phase hooks, and are only interested in
2413
    # their results
2414
    if phase == constants.HOOKS_PHASE_POST:
2415
      # Used to change hooks' output to proper indentation
2416
      feedback_fn("* Hooks Results")
2417
      assert hooks_results, "invalid result from hooks"
2418

    
2419
      for node_name in hooks_results:
2420
        res = hooks_results[node_name]
2421
        msg = res.fail_msg
2422
        test = msg and not res.offline
2423
        self._ErrorIf(test, self.ENODEHOOKS, node_name,
2424
                      "Communication failure in hooks execution: %s", msg)
2425
        if res.offline or msg:
2426
          # No need to investigate payload if node is offline or gave an error.
2427
          # override manually lu_result here as _ErrorIf only
2428
          # overrides self.bad
2429
          lu_result = 1
2430
          continue
2431
        for script, hkr, output in res.payload:
2432
          test = hkr == constants.HKR_FAIL
2433
          self._ErrorIf(test, self.ENODEHOOKS, node_name,
2434
                        "Script %s failed, output:", script)
2435
          if test:
2436
            output = self._HOOKS_INDENT_RE.sub('      ', output)
2437
            feedback_fn("%s" % output)
2438
            lu_result = 0
2439

    
2440
      return lu_result
2441

    
2442

    
2443
class LUClusterVerifyDisks(NoHooksLU):
2444
  """Verifies the cluster disks status.
2445

2446
  """
2447
  REQ_BGL = False
2448

    
2449
  def ExpandNames(self):
2450
    self.needed_locks = {
2451
      locking.LEVEL_NODE: locking.ALL_SET,
2452
      locking.LEVEL_INSTANCE: locking.ALL_SET,
2453
    }
2454
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
2455

    
2456
  def Exec(self, feedback_fn):
2457
    """Verify integrity of cluster disks.
2458

2459
    @rtype: tuple of three items
2460
    @return: a tuple of (dict of node-to-node_error, list of instances
2461
        which need activate-disks, dict of instance: (node, volume) for
2462
        missing volumes
2463

2464
    """
2465
    result = res_nodes, res_instances, res_missing = {}, [], {}
2466

    
2467
    nodes = utils.NiceSort(self.cfg.GetVmCapableNodeList())
2468
    instances = self.cfg.GetAllInstancesInfo().values()
2469

    
2470
    nv_dict = {}
2471
    for inst in instances:
2472
      inst_lvs = {}
2473
      if not inst.admin_up:
2474
        continue
2475
      inst.MapLVsByNode(inst_lvs)
2476
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
2477
      for node, vol_list in inst_lvs.iteritems():
2478
        for vol in vol_list:
2479
          nv_dict[(node, vol)] = inst
2480

    
2481
    if not nv_dict:
2482
      return result
2483

    
2484
    node_lvs = self.rpc.call_lv_list(nodes, [])
2485
    for node, node_res in node_lvs.items():
2486
      if node_res.offline:
2487
        continue
2488
      msg = node_res.fail_msg
2489
      if msg:
2490
        logging.warning("Error enumerating LVs on node %s: %s", node, msg)
2491
        res_nodes[node] = msg
2492
        continue
2493

    
2494
      lvs = node_res.payload
2495
      for lv_name, (_, _, lv_online) in lvs.items():
2496
        inst = nv_dict.pop((node, lv_name), None)
2497
        if (not lv_online and inst is not None
2498
            and inst.name not in res_instances):
2499
          res_instances.append(inst.name)
2500

    
2501
    # any leftover items in nv_dict are missing LVs, let's arrange the
2502
    # data better
2503
    for key, inst in nv_dict.iteritems():
2504
      if inst.name not in res_missing:
2505
        res_missing[inst.name] = []
2506
      res_missing[inst.name].append(key)
2507

    
2508
    return result
2509

    
2510

    
2511
class LUClusterRepairDiskSizes(NoHooksLU):
2512
  """Verifies the cluster disks sizes.
2513

2514
  """
2515
  REQ_BGL = False
2516

    
2517
  def ExpandNames(self):
2518
    if self.op.instances:
2519
      self.wanted_names = []
2520
      for name in self.op.instances:
2521
        full_name = _ExpandInstanceName(self.cfg, name)
2522
        self.wanted_names.append(full_name)
2523
      self.needed_locks = {
2524
        locking.LEVEL_NODE: [],
2525
        locking.LEVEL_INSTANCE: self.wanted_names,
2526
        }
2527
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2528
    else:
2529
      self.wanted_names = None
2530
      self.needed_locks = {
2531
        locking.LEVEL_NODE: locking.ALL_SET,
2532
        locking.LEVEL_INSTANCE: locking.ALL_SET,
2533
        }
2534
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
2535

    
2536
  def DeclareLocks(self, level):
2537
    if level == locking.LEVEL_NODE and self.wanted_names is not None:
2538
      self._LockInstancesNodes(primary_only=True)
2539

    
2540
  def CheckPrereq(self):
2541
    """Check prerequisites.
2542

2543
    This only checks the optional instance list against the existing names.
2544

2545
    """
2546
    if self.wanted_names is None:
2547
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2548

    
2549
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
2550
                             in self.wanted_names]
2551

    
2552
  def _EnsureChildSizes(self, disk):
2553
    """Ensure children of the disk have the needed disk size.
2554

2555
    This is valid mainly for DRBD8 and fixes an issue where the
2556
    children have smaller disk size.
2557

2558
    @param disk: an L{ganeti.objects.Disk} object
2559

2560
    """
2561
    if disk.dev_type == constants.LD_DRBD8:
2562
      assert disk.children, "Empty children for DRBD8?"
2563
      fchild = disk.children[0]
2564
      mismatch = fchild.size < disk.size
2565
      if mismatch:
2566
        self.LogInfo("Child disk has size %d, parent %d, fixing",
2567
                     fchild.size, disk.size)
2568
        fchild.size = disk.size
2569

    
2570
      # and we recurse on this child only, not on the metadev
2571
      return self._EnsureChildSizes(fchild) or mismatch
2572
    else:
2573
      return False
2574

    
2575
  def Exec(self, feedback_fn):
2576
    """Verify the size of cluster disks.
2577

2578
    """
2579
    # TODO: check child disks too
2580
    # TODO: check differences in size between primary/secondary nodes
2581
    per_node_disks = {}
2582
    for instance in self.wanted_instances:
2583
      pnode = instance.primary_node
2584
      if pnode not in per_node_disks:
2585
        per_node_disks[pnode] = []
2586
      for idx, disk in enumerate(instance.disks):
2587
        per_node_disks[pnode].append((instance, idx, disk))
2588

    
2589
    changed = []
2590
    for node, dskl in per_node_disks.items():
2591
      newl = [v[2].Copy() for v in dskl]
2592
      for dsk in newl:
2593
        self.cfg.SetDiskID(dsk, node)
2594
      result = self.rpc.call_blockdev_getsize(node, newl)
2595
      if result.fail_msg:
2596
        self.LogWarning("Failure in blockdev_getsize call to node"
2597
                        " %s, ignoring", node)
2598
        continue
2599
      if len(result.payload) != len(dskl):
2600
        logging.warning("Invalid result from node %s: len(dksl)=%d,"
2601
                        " result.payload=%s", node, len(dskl), result.payload)
2602
        self.LogWarning("Invalid result from node %s, ignoring node results",
2603
                        node)
2604
        continue
2605
      for ((instance, idx, disk), size) in zip(dskl, result.payload):
2606
        if size is None:
2607
          self.LogWarning("Disk %d of instance %s did not return size"
2608
                          " information, ignoring", idx, instance.name)
2609
          continue
2610
        if not isinstance(size, (int, long)):
2611
          self.LogWarning("Disk %d of instance %s did not return valid"
2612
                          " size information, ignoring", idx, instance.name)
2613
          continue
2614
        size = size >> 20
2615
        if size != disk.size:
2616
          self.LogInfo("Disk %d of instance %s has mismatched size,"
2617
                       " correcting: recorded %d, actual %d", idx,
2618
                       instance.name, disk.size, size)
2619
          disk.size = size
2620
          self.cfg.Update(instance, feedback_fn)
2621
          changed.append((instance.name, idx, size))
2622
        if self._EnsureChildSizes(disk):
2623
          self.cfg.Update(instance, feedback_fn)
2624
          changed.append((instance.name, idx, disk.size))
2625
    return changed
2626

    
2627

    
2628
class LUClusterRename(LogicalUnit):
2629
  """Rename the cluster.
2630

2631
  """
2632
  HPATH = "cluster-rename"
2633
  HTYPE = constants.HTYPE_CLUSTER
2634

    
2635
  def BuildHooksEnv(self):
2636
    """Build hooks env.
2637

2638
    """
2639
    env = {
2640
      "OP_TARGET": self.cfg.GetClusterName(),
2641
      "NEW_NAME": self.op.name,
2642
      }
2643
    mn = self.cfg.GetMasterNode()
2644
    all_nodes = self.cfg.GetNodeList()
2645
    return env, [mn], all_nodes
2646

    
2647
  def CheckPrereq(self):
2648
    """Verify that the passed name is a valid one.
2649

2650
    """
2651
    hostname = netutils.GetHostname(name=self.op.name,
2652
                                    family=self.cfg.GetPrimaryIPFamily())
2653

    
2654
    new_name = hostname.name
2655
    self.ip = new_ip = hostname.ip
2656
    old_name = self.cfg.GetClusterName()
2657
    old_ip = self.cfg.GetMasterIP()
2658
    if new_name == old_name and new_ip == old_ip:
2659
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
2660
                                 " cluster has changed",
2661
                                 errors.ECODE_INVAL)
2662
    if new_ip != old_ip:
2663
      if netutils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
2664
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
2665
                                   " reachable on the network" %
2666
                                   new_ip, errors.ECODE_NOTUNIQUE)
2667

    
2668
    self.op.name = new_name
2669

    
2670
  def Exec(self, feedback_fn):
2671
    """Rename the cluster.
2672

2673
    """
2674
    clustername = self.op.name
2675
    ip = self.ip
2676

    
2677
    # shutdown the master IP
2678
    master = self.cfg.GetMasterNode()
2679
    result = self.rpc.call_node_stop_master(master, False)
2680
    result.Raise("Could not disable the master role")
2681

    
2682
    try:
2683
      cluster = self.cfg.GetClusterInfo()
2684
      cluster.cluster_name = clustername
2685
      cluster.master_ip = ip
2686
      self.cfg.Update(cluster, feedback_fn)
2687

    
2688
      # update the known hosts file
2689
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
2690
      node_list = self.cfg.GetOnlineNodeList()
2691
      try:
2692
        node_list.remove(master)
2693
      except ValueError:
2694
        pass
2695
      _UploadHelper(self, node_list, constants.SSH_KNOWN_HOSTS_FILE)
2696
    finally:
2697
      result = self.rpc.call_node_start_master(master, False, False)
2698
      msg = result.fail_msg
2699
      if msg:
2700
        self.LogWarning("Could not re-enable the master role on"
2701
                        " the master, please restart manually: %s", msg)
2702

    
2703
    return clustername
2704

    
2705

    
2706
class LUClusterSetParams(LogicalUnit):
2707
  """Change the parameters of the cluster.
2708

2709
  """
2710
  HPATH = "cluster-modify"
2711
  HTYPE = constants.HTYPE_CLUSTER
2712
  REQ_BGL = False
2713

    
2714
  def CheckArguments(self):
2715
    """Check parameters
2716

2717
    """
2718
    if self.op.uid_pool:
2719
      uidpool.CheckUidPool(self.op.uid_pool)
2720

    
2721
    if self.op.add_uids:
2722
      uidpool.CheckUidPool(self.op.add_uids)
2723

    
2724
    if self.op.remove_uids:
2725
      uidpool.CheckUidPool(self.op.remove_uids)
2726

    
2727
  def ExpandNames(self):
2728
    # FIXME: in the future maybe other cluster params won't require checking on
2729
    # all nodes to be modified.
2730
    self.needed_locks = {
2731
      locking.LEVEL_NODE: locking.ALL_SET,
2732
    }
2733
    self.share_locks[locking.LEVEL_NODE] = 1
2734

    
2735
  def BuildHooksEnv(self):
2736
    """Build hooks env.
2737

2738
    """
2739
    env = {
2740
      "OP_TARGET": self.cfg.GetClusterName(),
2741
      "NEW_VG_NAME": self.op.vg_name,
2742
      }
2743
    mn = self.cfg.GetMasterNode()
2744
    return env, [mn], [mn]
2745

    
2746
  def CheckPrereq(self):
2747
    """Check prerequisites.
2748

2749
    This checks whether the given params don't conflict and
2750
    if the given volume group is valid.
2751

2752
    """
2753
    if self.op.vg_name is not None and not self.op.vg_name:
2754
      if self.cfg.HasAnyDiskOfType(constants.LD_LV):
2755
        raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
2756
                                   " instances exist", errors.ECODE_INVAL)
2757

    
2758
    if self.op.drbd_helper is not None and not self.op.drbd_helper:
2759
      if self.cfg.HasAnyDiskOfType(constants.LD_DRBD8):
2760
        raise errors.OpPrereqError("Cannot disable drbd helper while"
2761
                                   " drbd-based instances exist",
2762
                                   errors.ECODE_INVAL)
2763

    
2764
    node_list = self.acquired_locks[locking.LEVEL_NODE]
2765

    
2766
    # if vg_name not None, checks given volume group on all nodes
2767
    if self.op.vg_name:
2768
      vglist = self.rpc.call_vg_list(node_list)
2769
      for node in node_list:
2770
        msg = vglist[node].fail_msg
2771
        if msg:
2772
          # ignoring down node
2773
          self.LogWarning("Error while gathering data on node %s"
2774
                          " (ignoring node): %s", node, msg)
2775
          continue
2776
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
2777
                                              self.op.vg_name,
2778
                                              constants.MIN_VG_SIZE)
2779
        if vgstatus:
2780
          raise errors.OpPrereqError("Error on node '%s': %s" %
2781
                                     (node, vgstatus), errors.ECODE_ENVIRON)
2782

    
2783
    if self.op.drbd_helper:
2784
      # checks given drbd helper on all nodes
2785
      helpers = self.rpc.call_drbd_helper(node_list)
2786
      for node in node_list:
2787
        ninfo = self.cfg.GetNodeInfo(node)
2788
        if ninfo.offline:
2789
          self.LogInfo("Not checking drbd helper on offline node %s", node)
2790
          continue
2791
        msg = helpers[node].fail_msg
2792
        if msg:
2793
          raise errors.OpPrereqError("Error checking drbd helper on node"
2794
                                     " '%s': %s" % (node, msg),
2795
                                     errors.ECODE_ENVIRON)
2796
        node_helper = helpers[node].payload
2797
        if node_helper != self.op.drbd_helper:
2798
          raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
2799
                                     (node, node_helper), errors.ECODE_ENVIRON)
2800

    
2801
    self.cluster = cluster = self.cfg.GetClusterInfo()
2802
    # validate params changes
2803
    if self.op.beparams:
2804
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
2805
      self.new_beparams = cluster.SimpleFillBE(self.op.beparams)
2806

    
2807
    if self.op.ndparams:
2808
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
2809
      self.new_ndparams = cluster.SimpleFillND(self.op.ndparams)
2810

    
2811
    if self.op.nicparams:
2812
      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
2813
      self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
2814
      objects.NIC.CheckParameterSyntax(self.new_nicparams)
2815
      nic_errors = []
2816

    
2817
      # check all instances for consistency
2818
      for instance in self.cfg.GetAllInstancesInfo().values():
2819
        for nic_idx, nic in enumerate(instance.nics):
2820
          params_copy = copy.deepcopy(nic.nicparams)
2821
          params_filled = objects.FillDict(self.new_nicparams, params_copy)
2822

    
2823
          # check parameter syntax
2824
          try:
2825
            objects.NIC.CheckParameterSyntax(params_filled)
2826
          except errors.ConfigurationError, err:
2827
            nic_errors.append("Instance %s, nic/%d: %s" %
2828
                              (instance.name, nic_idx, err))
2829

    
2830
          # if we're moving instances to routed, check that they have an ip
2831
          target_mode = params_filled[constants.NIC_MODE]
2832
          if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
2833
            nic_errors.append("Instance %s, nic/%d: routed nick with no ip" %
2834
                              (instance.name, nic_idx))
2835
      if nic_errors:
2836
        raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
2837
                                   "\n".join(nic_errors))
2838

    
2839
    # hypervisor list/parameters
2840
    self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
2841
    if self.op.hvparams:
2842
      for hv_name, hv_dict in self.op.hvparams.items():
2843
        if hv_name not in self.new_hvparams:
2844
          self.new_hvparams[hv_name] = hv_dict
2845
        else:
2846
          self.new_hvparams[hv_name].update(hv_dict)
2847

    
2848
    # os hypervisor parameters
2849
    self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
2850
    if self.op.os_hvp:
2851
      for os_name, hvs in self.op.os_hvp.items():
2852
        if os_name not in self.new_os_hvp:
2853
          self.new_os_hvp[os_name] = hvs
2854
        else:
2855
          for hv_name, hv_dict in hvs.items():
2856
            if hv_name not in self.new_os_hvp[os_name]:
2857
              self.new_os_hvp[os_name][hv_name] = hv_dict
2858
            else:
2859
              self.new_os_hvp[os_name][hv_name].update(hv_dict)
2860

    
2861
    # os parameters
2862
    self.new_osp = objects.FillDict(cluster.osparams, {})
2863
    if self.op.osparams:
2864
      for os_name, osp in self.op.osparams.items():
2865
        if os_name not in self.new_osp:
2866
          self.new_osp[os_name] = {}
2867

    
2868
        self.new_osp[os_name] = _GetUpdatedParams(self.new_osp[os_name], osp,
2869
                                                  use_none=True)
2870

    
2871
        if not self.new_osp[os_name]:
2872
          # we removed all parameters
2873
          del self.new_osp[os_name]
2874
        else:
2875
          # check the parameter validity (remote check)
2876
          _CheckOSParams(self, False, [self.cfg.GetMasterNode()],
2877
                         os_name, self.new_osp[os_name])
2878

    
2879
    # changes to the hypervisor list
2880
    if self.op.enabled_hypervisors is not None:
2881
      self.hv_list = self.op.enabled_hypervisors
2882
      for hv in self.hv_list:
2883
        # if the hypervisor doesn't already exist in the cluster
2884
        # hvparams, we initialize it to empty, and then (in both
2885
        # cases) we make sure to fill the defaults, as we might not
2886
        # have a complete defaults list if the hypervisor wasn't
2887
        # enabled before
2888
        if hv not in new_hvp:
2889
          new_hvp[hv] = {}
2890
        new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
2891
        utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
2892
    else:
2893
      self.hv_list = cluster.enabled_hypervisors
2894

    
2895
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
2896
      # either the enabled list has changed, or the parameters have, validate
2897
      for hv_name, hv_params in self.new_hvparams.items():
2898
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
2899
            (self.op.enabled_hypervisors and
2900
             hv_name in self.op.enabled_hypervisors)):
2901
          # either this is a new hypervisor, or its parameters have changed
2902
          hv_class = hypervisor.GetHypervisor(hv_name)
2903
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2904
          hv_class.CheckParameterSyntax(hv_params)
2905
          _CheckHVParams(self, node_list, hv_name, hv_params)
2906

    
2907
    if self.op.os_hvp:
2908
      # no need to check any newly-enabled hypervisors, since the
2909
      # defaults have already been checked in the above code-block
2910
      for os_name, os_hvp in self.new_os_hvp.items():
2911
        for hv_name, hv_params in os_hvp.items():
2912
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2913
          # we need to fill in the new os_hvp on top of the actual hv_p
2914
          cluster_defaults = self.new_hvparams.get(hv_name, {})
2915
          new_osp = objects.FillDict(cluster_defaults, hv_params)
2916
          hv_class = hypervisor.GetHypervisor(hv_name)
2917
          hv_class.CheckParameterSyntax(new_osp)
2918
          _CheckHVParams(self, node_list, hv_name, new_osp)
2919

    
2920
    if self.op.default_iallocator:
2921
      alloc_script = utils.FindFile(self.op.default_iallocator,
2922
                                    constants.IALLOCATOR_SEARCH_PATH,
2923
                                    os.path.isfile)
2924
      if alloc_script is None:
2925
        raise errors.OpPrereqError("Invalid default iallocator script '%s'"
2926
                                   " specified" % self.op.default_iallocator,
2927
                                   errors.ECODE_INVAL)
2928

    
2929
  def Exec(self, feedback_fn):
2930
    """Change the parameters of the cluster.
2931

2932
    """
2933
    if self.op.vg_name is not None:
2934
      new_volume = self.op.vg_name
2935
      if not new_volume:
2936
        new_volume = None
2937
      if new_volume != self.cfg.GetVGName():
2938
        self.cfg.SetVGName(new_volume)
2939
      else:
2940
        feedback_fn("Cluster LVM configuration already in desired"
2941
                    " state, not changing")
2942
    if self.op.drbd_helper is not None:
2943
      new_helper = self.op.drbd_helper
2944
      if not new_helper:
2945
        new_helper = None
2946
      if new_helper != self.cfg.GetDRBDHelper():
2947
        self.cfg.SetDRBDHelper(new_helper)
2948
      else:
2949
        feedback_fn("Cluster DRBD helper already in desired state,"
2950
                    " not changing")
2951
    if self.op.hvparams:
2952
      self.cluster.hvparams = self.new_hvparams
2953
    if self.op.os_hvp:
2954
      self.cluster.os_hvp = self.new_os_hvp
2955
    if self.op.enabled_hypervisors is not None:
2956
      self.cluster.hvparams = self.new_hvparams
2957
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
2958
    if self.op.beparams:
2959
      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
2960
    if self.op.nicparams:
2961
      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
2962
    if self.op.osparams:
2963
      self.cluster.osparams = self.new_osp
2964
    if self.op.ndparams:
2965
      self.cluster.ndparams = self.new_ndparams
2966

    
2967
    if self.op.candidate_pool_size is not None:
2968
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
2969
      # we need to update the pool size here, otherwise the save will fail
2970
      _AdjustCandidatePool(self, [])
2971

    
2972
    if self.op.maintain_node_health is not None:
2973
      self.cluster.maintain_node_health = self.op.maintain_node_health
2974

    
2975
    if self.op.prealloc_wipe_disks is not None:
2976
      self.cluster.prealloc_wipe_disks = self.op.prealloc_wipe_disks
2977

    
2978
    if self.op.add_uids is not None:
2979
      uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
2980

    
2981
    if self.op.remove_uids is not None:
2982
      uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
2983

    
2984
    if self.op.uid_pool is not None:
2985
      self.cluster.uid_pool = self.op.uid_pool
2986

    
2987
    if self.op.default_iallocator is not None:
2988
      self.cluster.default_iallocator = self.op.default_iallocator
2989

    
2990
    if self.op.reserved_lvs is not None:
2991
      self.cluster.reserved_lvs = self.op.reserved_lvs
2992

    
2993
    def helper_os(aname, mods, desc):
2994
      desc += " OS list"
2995
      lst = getattr(self.cluster, aname)
2996
      for key, val in mods:
2997
        if key == constants.DDM_ADD:
2998
          if val in lst:
2999
            feedback_fn("OS %s already in %s, ignoring" % (val, desc))
3000
          else:
3001
            lst.append(val)
3002
        elif key == constants.DDM_REMOVE:
3003
          if val in lst:
3004
            lst.remove(val)
3005
          else:
3006
            feedback_fn("OS %s not found in %s, ignoring" % (val, desc))
3007
        else:
3008
          raise errors.ProgrammerError("Invalid modification '%s'" % key)
3009

    
3010
    if self.op.hidden_os:
3011
      helper_os("hidden_os", self.op.hidden_os, "hidden")
3012

    
3013
    if self.op.blacklisted_os:
3014
      helper_os("blacklisted_os", self.op.blacklisted_os, "blacklisted")
3015

    
3016
    if self.op.master_netdev:
3017
      master = self.cfg.GetMasterNode()
3018
      feedback_fn("Shutting down master ip on the current netdev (%s)" %
3019
                  self.cluster.master_netdev)
3020
      result = self.rpc.call_node_stop_master(master, False)
3021
      result.Raise("Could not disable the master ip")
3022
      feedback_fn("Changing master_netdev from %s to %s" %
3023
                  (self.cluster.master_netdev, self.op.master_netdev))
3024
      self.cluster.master_netdev = self.op.master_netdev
3025

    
3026
    self.cfg.Update(self.cluster, feedback_fn)
3027

    
3028
    if self.op.master_netdev:
3029
      feedback_fn("Starting the master ip on the new master netdev (%s)" %
3030
                  self.op.master_netdev)
3031
      result = self.rpc.call_node_start_master(master, False, False)
3032
      if result.fail_msg:
3033
        self.LogWarning("Could not re-enable the master ip on"
3034
                        " the master, please restart manually: %s",
3035
                        result.fail_msg)
3036

    
3037

    
3038
def _UploadHelper(lu, nodes, fname):
3039
  """Helper for uploading a file and showing warnings.
3040

3041
  """
3042
  if os.path.exists(fname):
3043
    result = lu.rpc.call_upload_file(nodes, fname)
3044
    for to_node, to_result in result.items():
3045
      msg = to_result.fail_msg
3046
      if msg:
3047
        msg = ("Copy of file %s to node %s failed: %s" %
3048
               (fname, to_node, msg))
3049
        lu.proc.LogWarning(msg)
3050

    
3051

    
3052
def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
3053
  """Distribute additional files which are part of the cluster configuration.
3054

3055
  ConfigWriter takes care of distributing the config and ssconf files, but
3056
  there are more files which should be distributed to all nodes. This function
3057
  makes sure those are copied.
3058

3059
  @param lu: calling logical unit
3060
  @param additional_nodes: list of nodes not in the config to distribute to
3061
  @type additional_vm: boolean
3062
  @param additional_vm: whether the additional nodes are vm-capable or not
3063

3064
  """
3065
  # 1. Gather target nodes
3066
  myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
3067
  dist_nodes = lu.cfg.GetOnlineNodeList()
3068
  nvm_nodes = lu.cfg.GetNonVmCapableNodeList()
3069
  vm_nodes = [name for name in dist_nodes if name not in nvm_nodes]
3070
  if additional_nodes is not None:
3071
    dist_nodes.extend(additional_nodes)
3072
    if additional_vm:
3073
      vm_nodes.extend(additional_nodes)
3074
  if myself.name in dist_nodes:
3075
    dist_nodes.remove(myself.name)
3076
  if myself.name in vm_nodes:
3077
    vm_nodes.remove(myself.name)
3078

    
3079
  # 2. Gather files to distribute
3080
  dist_files = set([constants.ETC_HOSTS,
3081
                    constants.SSH_KNOWN_HOSTS_FILE,
3082
                    constants.RAPI_CERT_FILE,
3083
                    constants.RAPI_USERS_FILE,
3084
                    constants.CONFD_HMAC_KEY,
3085
                    constants.CLUSTER_DOMAIN_SECRET_FILE,
3086
                   ])
3087

    
3088
  vm_files = set()
3089
  enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
3090
  for hv_name in enabled_hypervisors:
3091
    hv_class = hypervisor.GetHypervisor(hv_name)
3092
    vm_files.update(hv_class.GetAncillaryFiles())
3093

    
3094
  # 3. Perform the files upload
3095
  for fname in dist_files:
3096
    _UploadHelper(lu, dist_nodes, fname)
3097
  for fname in vm_files:
3098
    _UploadHelper(lu, vm_nodes, fname)
3099

    
3100

    
3101
class LUClusterRedistConf(NoHooksLU):
3102
  """Force the redistribution of cluster configuration.
3103

3104
  This is a very simple LU.
3105

3106
  """
3107
  REQ_BGL = False
3108

    
3109
  def ExpandNames(self):
3110
    self.needed_locks = {
3111
      locking.LEVEL_NODE: locking.ALL_SET,
3112
    }
3113
    self.share_locks[locking.LEVEL_NODE] = 1
3114

    
3115
  def Exec(self, feedback_fn):
3116
    """Redistribute the configuration.
3117

3118
    """
3119
    self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
3120
    _RedistributeAncillaryFiles(self)
3121

    
3122

    
3123
def _WaitForSync(lu, instance, disks=None, oneshot=False):
3124
  """Sleep and poll for an instance's disk to sync.
3125

3126
  """
3127
  if not instance.disks or disks is not None and not disks:
3128
    return True
3129

    
3130
  disks = _ExpandCheckDisks(instance, disks)
3131

    
3132
  if not oneshot:
3133
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
3134

    
3135
  node = instance.primary_node
3136

    
3137
  for dev in disks:
3138
    lu.cfg.SetDiskID(dev, node)
3139

    
3140
  # TODO: Convert to utils.Retry
3141

    
3142
  retries = 0
3143
  degr_retries = 10 # in seconds, as we sleep 1 second each time
3144
  while True:
3145
    max_time = 0
3146
    done = True
3147
    cumul_degraded = False
3148
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, disks)
3149
    msg = rstats.fail_msg
3150
    if msg:
3151
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
3152
      retries += 1
3153
      if retries >= 10:
3154
        raise errors.RemoteError("Can't contact node %s for mirror data,"
3155
                                 " aborting." % node)
3156
      time.sleep(6)
3157
      continue
3158
    rstats = rstats.payload
3159
    retries = 0
3160
    for i, mstat in enumerate(rstats):
3161
      if mstat is None:
3162
        lu.LogWarning("Can't compute data for node %s/%s",
3163
                           node, disks[i].iv_name)
3164
        continue
3165

    
3166
      cumul_degraded = (cumul_degraded or
3167
                        (mstat.is_degraded and mstat.sync_percent is None))
3168
      if mstat.sync_percent is not None:
3169
        done = False
3170
        if mstat.estimated_time is not None:
3171
          rem_time = ("%s remaining (estimated)" %
3172
                      utils.FormatSeconds(mstat.estimated_time))
3173
          max_time = mstat.estimated_time
3174
        else:
3175
          rem_time = "no time estimate"
3176
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
3177
                        (disks[i].iv_name, mstat.sync_percent, rem_time))
3178

    
3179
    # if we're done but degraded, let's do a few small retries, to
3180
    # make sure we see a stable and not transient situation; therefore
3181
    # we force restart of the loop
3182
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
3183
      logging.info("Degraded disks found, %d retries left", degr_retries)
3184
      degr_retries -= 1
3185
      time.sleep(1)
3186
      continue
3187

    
3188
    if done or oneshot:
3189
      break
3190

    
3191
    time.sleep(min(60, max_time))
3192

    
3193
  if done:
3194
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
3195
  return not cumul_degraded
3196

    
3197

    
3198
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
3199
  """Check that mirrors are not degraded.
3200

3201
  The ldisk parameter, if True, will change the test from the
3202
  is_degraded attribute (which represents overall non-ok status for
3203
  the device(s)) to the ldisk (representing the local storage status).
3204

3205
  """
3206
  lu.cfg.SetDiskID(dev, node)
3207

    
3208
  result = True
3209

    
3210
  if on_primary or dev.AssembleOnSecondary():
3211
    rstats = lu.rpc.call_blockdev_find(node, dev)
3212
    msg = rstats.fail_msg
3213
    if msg:
3214
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
3215
      result = False
3216
    elif not rstats.payload:
3217
      lu.LogWarning("Can't find disk on node %s", node)
3218
      result = False
3219
    else:
3220
      if ldisk:
3221
        result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
3222
      else:
3223
        result = result and not rstats.payload.is_degraded
3224

    
3225
  if dev.children:
3226
    for child in dev.children:
3227
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
3228

    
3229
  return result
3230

    
3231

    
3232
class LUOobCommand(NoHooksLU):
3233
  """Logical unit for OOB handling.
3234

3235
  """
3236
  REG_BGL = False
3237
  _SKIP_MASTER = (constants.OOB_POWER_OFF, constants.OOB_POWER_CYCLE)
3238

    
3239
  def CheckPrereq(self):
3240
    """Check prerequisites.
3241

3242
    This checks:
3243
     - the node exists in the configuration
3244
     - OOB is supported
3245

3246
    Any errors are signaled by raising errors.OpPrereqError.
3247

3248
    """
3249
    self.nodes = []
3250
    self.master_node = self.cfg.GetMasterNode()
3251

    
3252
    assert self.op.power_delay >= 0.0
3253

    
3254
    if self.op.node_names:
3255
      if self.op.command in self._SKIP_MASTER:
3256
        if self.master_node in self.op.node_names:
3257
          master_node_obj = self.cfg.GetNodeInfo(self.master_node)
3258
          master_oob_handler = _SupportsOob(self.cfg, master_node_obj)
3259

    
3260
          if master_oob_handler:
3261
            additional_text = ("Run '%s %s %s' if you want to operate on the"
3262
                               " master regardless") % (master_oob_handler,
3263
                                                        self.op.command,
3264
                                                        self.master_node)
3265
          else:
3266
            additional_text = "The master node does not support out-of-band"
3267

    
3268
          raise errors.OpPrereqError(("Operating on the master node %s is not"
3269
                                      " allowed for %s\n%s") %
3270
                                     (self.master_node, self.op.command,
3271
                                      additional_text), errors.ECODE_INVAL)
3272
    else:
3273
      self.op.node_names = self.cfg.GetNodeList()
3274
      if self.op.command in self._SKIP_MASTER:
3275
        self.op.node_names.remove(self.master_node)
3276

    
3277
    if self.op.command in self._SKIP_MASTER:
3278
      assert self.master_node not in self.op.node_names
3279

    
3280
    for node_name in self.op.node_names:
3281
      node = self.cfg.GetNodeInfo(node_name)
3282

    
3283
      if node is None:
3284
        raise errors.OpPrereqError("Node %s not found" % node_name,
3285
                                   errors.ECODE_NOENT)
3286
      else:
3287
        self.nodes.append(node)
3288

    
3289
      if (not self.op.ignore_status and
3290
          (self.op.command == constants.OOB_POWER_OFF and not node.offline)):
3291
        raise errors.OpPrereqError(("Cannot power off node %s because it is"
3292
                                    " not marked offline") % node_name,
3293
                                   errors.ECODE_STATE)
3294

    
3295
  def ExpandNames(self):
3296
    """Gather locks we need.
3297

3298
    """
3299
    if self.op.node_names:
3300
      self.op.node_names = [_ExpandNodeName(self.cfg, name)
3301
                            for name in self.op.node_names]
3302
      lock_names = self.op.node_names
3303
    else:
3304
      lock_names = locking.ALL_SET
3305

    
3306
    self.needed_locks = {
3307
      locking.LEVEL_NODE: lock_names,
3308
      }
3309

    
3310
  def Exec(self, feedback_fn):
3311
    """Execute OOB and return result if we expect any.
3312

3313
    """
3314
    master_node = self.master_node
3315
    ret = []
3316

    
3317
    for idx, node in enumerate(self.nodes):
3318
      node_entry = [(constants.RS_NORMAL, node.name)]
3319
      ret.append(node_entry)
3320

    
3321
      oob_program = _SupportsOob(self.cfg, node)
3322

    
3323
      if not oob_program:
3324
        node_entry.append((constants.RS_UNAVAIL, None))
3325
        continue
3326

    
3327
      logging.info("Executing out-of-band command '%s' using '%s' on %s",
3328
                   self.op.command, oob_program, node.name)
3329
      result = self.rpc.call_run_oob(master_node, oob_program,
3330
                                     self.op.command, node.name,
3331
                                     self.op.timeout)
3332

    
3333
      if result.fail_msg:
3334
        self.LogWarning("On node '%s' out-of-band RPC failed with: %s",
3335
                        node.name, result.fail_msg)
3336
        node_entry.append((constants.RS_NODATA, None))
3337
      else:
3338
        try:
3339
          self._CheckPayload(result)
3340
        except errors.OpExecError, err:
3341
          self.LogWarning("The payload returned by '%s' is not valid: %s",
3342
                          node.name, err)
3343
          node_entry.append((constants.RS_NODATA, None))
3344
        else:
3345
          if self.op.command == constants.OOB_HEALTH:
3346
            # For health we should log important events
3347
            for item, status in result.payload:
3348
              if status in [constants.OOB_STATUS_WARNING,
3349
                            constants.OOB_STATUS_CRITICAL]:
3350
                self.LogWarning("On node '%s' item '%s' has status '%s'",
3351
                                node.name, item, status)
3352

    
3353
          if self.op.command == constants.OOB_POWER_ON:
3354
            node.powered = True
3355
          elif self.op.command == constants.OOB_POWER_OFF:
3356
            node.powered = False
3357
          elif self.op.command == constants.OOB_POWER_STATUS:
3358
            powered = result.payload[constants.OOB_POWER_STATUS_POWERED]
3359
            if powered != node.powered:
3360
              logging.warning(("Recorded power state (%s) of node '%s' does not"
3361
                               " match actual power state (%s)"), node.powered,
3362
                              node.name, powered)
3363

    
3364
          # For configuration changing commands we should update the node
3365
          if self.op.command in (constants.OOB_POWER_ON,
3366
                                 constants.OOB_POWER_OFF):
3367
            self.cfg.Update(node, feedback_fn)
3368

    
3369
          node_entry.append((constants.RS_NORMAL, result.payload))
3370

    
3371
          if (self.op.command == constants.OOB_POWER_ON and
3372
              idx < len(self.nodes) - 1):
3373
            time.sleep(self.op.power_delay)
3374

    
3375
    return ret
3376

    
3377
  def _CheckPayload(self, result):
3378
    """Checks if the payload is valid.
3379

3380
    @param result: RPC result
3381
    @raises errors.OpExecError: If payload is not valid
3382

3383
    """
3384
    errs = []
3385
    if self.op.command == constants.OOB_HEALTH:
3386
      if not isinstance(result.payload, list):
3387
        errs.append("command 'health' is expected to return a list but got %s" %
3388
                    type(result.payload))
3389
      else:
3390
        for item, status in result.payload:
3391
          if status not in constants.OOB_STATUSES:
3392
            errs.append("health item '%s' has invalid status '%s'" %
3393
                        (item, status))
3394

    
3395
    if self.op.command == constants.OOB_POWER_STATUS:
3396
      if not isinstance(result.payload, dict):
3397
        errs.append("power-status is expected to return a dict but got %s" %
3398
                    type(result.payload))
3399

    
3400
    if self.op.command in [
3401
        constants.OOB_POWER_ON,
3402
        constants.OOB_POWER_OFF,
3403
        constants.OOB_POWER_CYCLE,
3404
        ]:
3405
      if result.payload is not None:
3406
        errs.append("%s is expected to not return payload but got '%s'" %
3407
                    (self.op.command, result.payload))
3408

    
3409
    if errs:
3410
      raise errors.OpExecError("Check of out-of-band payload failed due to %s" %
3411
                               utils.CommaJoin(errs))
3412

    
3413
class _OsQuery(_QueryBase):
3414
  FIELDS = query.OS_FIELDS
3415

    
3416
  def ExpandNames(self, lu):
3417
    # Lock all nodes in shared mode
3418
    # Temporary removal of locks, should be reverted later
3419
    # TODO: reintroduce locks when they are lighter-weight
3420
    lu.needed_locks = {}
3421
    #self.share_locks[locking.LEVEL_NODE] = 1
3422
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3423

    
3424
    # The following variables interact with _QueryBase._GetNames
3425
    if self.names:
3426
      self.wanted = self.names
3427
    else:
3428
      self.wanted = locking.ALL_SET
3429

    
3430
    self.do_locking = self.use_locking
3431

    
3432
  def DeclareLocks(self, lu, level):
3433
    pass
3434

    
3435
  @staticmethod
3436
  def _DiagnoseByOS(rlist):
3437
    """Remaps a per-node return list into an a per-os per-node dictionary
3438

3439
    @param rlist: a map with node names as keys and OS objects as values
3440

3441
    @rtype: dict
3442
    @return: a dictionary with osnames as keys and as value another
3443
        map, with nodes as keys and tuples of (path, status, diagnose,
3444
        variants, parameters, api_versions) as values, eg::
3445

3446
          {"debian-etch": {"node1": [(/usr/lib/..., True, "", [], []),
3447
                                     (/srv/..., False, "invalid api")],
3448
                           "node2": [(/srv/..., True, "", [], [])]}
3449
          }
3450

3451
    """
3452
    all_os = {}
3453
    # we build here the list of nodes that didn't fail the RPC (at RPC
3454
    # level), so that nodes with a non-responding node daemon don't
3455
    # make all OSes invalid
3456
    good_nodes = [node_name for node_name in rlist
3457
                  if not rlist[node_name].fail_msg]
3458
    for node_name, nr in rlist.items():
3459
      if nr.fail_msg or not nr.payload:
3460
        continue
3461
      for (name, path, status, diagnose, variants,
3462
           params, api_versions) in nr.payload:
3463
        if name not in all_os:
3464
          # build a list of nodes for this os containing empty lists
3465
          # for each node in node_list
3466
          all_os[name] = {}
3467
          for nname in good_nodes:
3468
            all_os[name][nname] = []
3469
        # convert params from [name, help] to (name, help)
3470
        params = [tuple(v) for v in params]
3471
        all_os[name][node_name].append((path, status, diagnose,
3472
                                        variants, params, api_versions))
3473
    return all_os
3474

    
3475
  def _GetQueryData(self, lu):
3476
    """Computes the list of nodes and their attributes.
3477

3478
    """
3479
    # Locking is not used
3480
    assert not (lu.acquired_locks or self.do_locking or self.use_locking)
3481

    
3482
    # Used further down
3483
    assert "valid" in self.FIELDS
3484
    assert "hidden" in self.FIELDS
3485
    assert "blacklisted" in self.FIELDS
3486

    
3487
    valid_nodes = [node.name
3488
                   for node in lu.cfg.GetAllNodesInfo().values()
3489
                   if not node.offline and node.vm_capable]
3490
    pol = self._DiagnoseByOS(lu.rpc.call_os_diagnose(valid_nodes))
3491
    cluster = lu.cfg.GetClusterInfo()
3492

    
3493
    # Build list of used field names
3494
    fields = [fdef.name for fdef in self.query.GetFields()]
3495

    
3496
    data = {}
3497

    
3498
    for (os_name, os_data) in pol.items():
3499
      info = query.OsInfo(name=os_name, valid=True, node_status=os_data,
3500
                          hidden=(os_name in cluster.hidden_os),
3501
                          blacklisted=(os_name in cluster.blacklisted_os))
3502

    
3503
      variants = set()
3504
      parameters = set()
3505
      api_versions = set()
3506

    
3507
      for idx, osl in enumerate(os_data.values()):
3508
        info.valid = bool(info.valid and osl and osl[0][1])
3509
        if not info.valid:
3510
          break
3511

    
3512
        (node_variants, node_params, node_api) = osl[0][3:6]
3513
        if idx == 0:
3514
          # First entry
3515
          variants.update(node_variants)
3516
          parameters.update(node_params)
3517
          api_versions.update(node_api)
3518
        else:
3519
          # Filter out inconsistent values
3520
          variants.intersection_update(node_variants)
3521
          parameters.intersection_update(node_params)
3522
          api_versions.intersection_update(node_api)
3523

    
3524
      info.variants = list(variants)
3525
      info.parameters = list(parameters)
3526
      info.api_versions = list(api_versions)
3527

    
3528
      # TODO: Move this to filters provided by the client
3529
      if (("hidden" not in fields and info.hidden) or
3530
          ("blacklisted" not in fields and info.blacklisted) or
3531
          ("valid" not in fields and not info.valid)):
3532
        continue
3533

    
3534
      data[os_name] = info
3535

    
3536
    # Prepare data in requested order
3537
    return [data[name] for name in self._GetNames(lu, pol.keys(), None)
3538
            if name in data]
3539

    
3540

    
3541
class LUOsDiagnose(NoHooksLU):
3542
  """Logical unit for OS diagnose/query.
3543

3544
  """
3545
  REQ_BGL = False
3546

    
3547
  def CheckArguments(self):
3548
    self.oq = _OsQuery(qlang.MakeSimpleFilter("name", self.op.names),
3549
                       self.op.output_fields, False)
3550

    
3551
  def ExpandNames(self):
3552
    self.oq.ExpandNames(self)
3553

    
3554
  def Exec(self, feedback_fn):
3555
    return self.oq.OldStyleQuery(self)
3556

    
3557

    
3558
class LUNodeRemove(LogicalUnit):
3559
  """Logical unit for removing a node.
3560

3561
  """
3562
  HPATH = "node-remove"
3563
  HTYPE = constants.HTYPE_NODE
3564

    
3565
  def BuildHooksEnv(self):
3566
    """Build hooks env.
3567

3568
    This doesn't run on the target node in the pre phase as a failed
3569
    node would then be impossible to remove.
3570

3571
    """
3572
    env = {
3573
      "OP_TARGET": self.op.node_name,
3574
      "NODE_NAME": self.op.node_name,
3575
      }
3576
    all_nodes = self.cfg.GetNodeList()
3577
    try:
3578
      all_nodes.remove(self.op.node_name)
3579
    except ValueError:
3580
      logging.warning("Node %s which is about to be removed not found"
3581
                      " in the all nodes list", self.op.node_name)
3582
    return env, all_nodes, all_nodes
3583

    
3584
  def CheckPrereq(self):
3585
    """Check prerequisites.
3586

3587
    This checks:
3588
     - the node exists in the configuration
3589
     - it does not have primary or secondary instances
3590
     - it's not the master
3591

3592
    Any errors are signaled by raising errors.OpPrereqError.
3593

3594
    """
3595
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3596
    node = self.cfg.GetNodeInfo(self.op.node_name)
3597
    assert node is not None
3598

    
3599
    instance_list = self.cfg.GetInstanceList()
3600

    
3601
    masternode = self.cfg.GetMasterNode()
3602
    if node.name == masternode:
3603
      raise errors.OpPrereqError("Node is the master node,"
3604
                                 " you need to failover first.",
3605
                                 errors.ECODE_INVAL)
3606

    
3607
    for instance_name in instance_list:
3608
      instance = self.cfg.GetInstanceInfo(instance_name)
3609
      if node.name in instance.all_nodes:
3610
        raise errors.OpPrereqError("Instance %s is still running on the node,"
3611
                                   " please remove first." % instance_name,
3612
                                   errors.ECODE_INVAL)
3613
    self.op.node_name = node.name
3614
    self.node = node
3615

    
3616
  def Exec(self, feedback_fn):
3617
    """Removes the node from the cluster.
3618

3619
    """
3620
    node = self.node
3621
    logging.info("Stopping the node daemon and removing configs from node %s",
3622
                 node.name)
3623

    
3624
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
3625

    
3626
    # Promote nodes to master candidate as needed
3627
    _AdjustCandidatePool(self, exceptions=[node.name])
3628
    self.context.RemoveNode(node.name)
3629

    
3630
    # Run post hooks on the node before it's removed
3631
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
3632
    try:
3633
      hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
3634
    except:
3635
      # pylint: disable-msg=W0702
3636
      self.LogWarning("Errors occurred running hooks on %s" % node.name)
3637

    
3638
    result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
3639
    msg = result.fail_msg
3640
    if msg:
3641
      self.LogWarning("Errors encountered on the remote node while leaving"
3642
                      " the cluster: %s", msg)
3643

    
3644
    # Remove node from our /etc/hosts
3645
    if self.cfg.GetClusterInfo().modify_etc_hosts:
3646
      master_node = self.cfg.GetMasterNode()
3647
      result = self.rpc.call_etc_hosts_modify(master_node,
3648
                                              constants.ETC_HOSTS_REMOVE,
3649
                                              node.name, None)
3650
      result.Raise("Can't update hosts file with new host data")
3651
      _RedistributeAncillaryFiles(self)
3652

    
3653

    
3654
class _NodeQuery(_QueryBase):
3655
  FIELDS = query.NODE_FIELDS
3656

    
3657
  def ExpandNames(self, lu):
3658
    lu.needed_locks = {}
3659
    lu.share_locks[locking.LEVEL_NODE] = 1
3660

    
3661
    if self.names:
3662
      self.wanted = _GetWantedNodes(lu, self.names)
3663
    else:
3664
      self.wanted = locking.ALL_SET
3665

    
3666
    self.do_locking = (self.use_locking and
3667
                       query.NQ_LIVE in self.requested_data)
3668

    
3669
    if self.do_locking:
3670
      # if we don't request only static fields, we need to lock the nodes
3671
      lu.needed_locks[locking.LEVEL_NODE] = self.wanted
3672

    
3673
  def DeclareLocks(self, lu, level):
3674
    pass
3675

    
3676
  def _GetQueryData(self, lu):
3677
    """Computes the list of nodes and their attributes.
3678

3679
    """
3680
    all_info = lu.cfg.GetAllNodesInfo()
3681

    
3682
    nodenames = self._GetNames(lu, all_info.keys(), locking.LEVEL_NODE)
3683

    
3684
    # Gather data as requested
3685
    if query.NQ_LIVE in self.requested_data:
3686
      # filter out non-vm_capable nodes
3687
      toquery_nodes = [name for name in nodenames if all_info[name].vm_capable]
3688

    
3689
      node_data = lu.rpc.call_node_info(toquery_nodes, lu.cfg.GetVGName(),
3690
                                        lu.cfg.GetHypervisorType())
3691
      live_data = dict((name, nresult.payload)
3692
                       for (name, nresult) in node_data.items()
3693
                       if not nresult.fail_msg and nresult.payload)
3694
    else:
3695
      live_data = None
3696

    
3697
    if query.NQ_INST in self.requested_data:
3698
      node_to_primary = dict([(name, set()) for name in nodenames])
3699
      node_to_secondary = dict([(name, set()) for name in nodenames])
3700

    
3701
      inst_data = lu.cfg.GetAllInstancesInfo()
3702

    
3703
      for inst in inst_data.values():
3704
        if inst.primary_node in node_to_primary:
3705
          node_to_primary[inst.primary_node].add(inst.name)
3706
        for secnode in inst.secondary_nodes:
3707
          if secnode in node_to_secondary:
3708
            node_to_secondary[secnode].add(inst.name)
3709
    else:
3710
      node_to_primary = None
3711
      node_to_secondary = None
3712

    
3713
    if query.NQ_OOB in self.requested_data:
3714
      oob_support = dict((name, bool(_SupportsOob(lu.cfg, node)))
3715
                         for name, node in all_info.iteritems())
3716
    else:
3717
      oob_support = None
3718

    
3719
    if query.NQ_GROUP in self.requested_data:
3720
      groups = lu.cfg.GetAllNodeGroupsInfo()
3721
    else:
3722
      groups = {}
3723

    
3724
    return query.NodeQueryData([all_info[name] for name in nodenames],
3725
                               live_data, lu.cfg.GetMasterNode(),
3726
                               node_to_primary, node_to_secondary, groups,
3727
                               oob_support, lu.cfg.GetClusterInfo())
3728

    
3729

    
3730
class LUNodeQuery(NoHooksLU):
3731
  """Logical unit for querying nodes.
3732

3733
  """
3734
  # pylint: disable-msg=W0142
3735
  REQ_BGL = False
3736

    
3737
  def CheckArguments(self):
3738
    self.nq = _NodeQuery(qlang.MakeSimpleFilter("name", self.op.names),
3739
                         self.op.output_fields, self.op.use_locking)
3740

    
3741
  def ExpandNames(self):
3742
    self.nq.ExpandNames(self)
3743

    
3744
  def Exec(self, feedback_fn):
3745
    return self.nq.OldStyleQuery(self)
3746

    
3747

    
3748
class LUNodeQueryvols(NoHooksLU):
3749
  """Logical unit for getting volumes on node(s).
3750

3751
  """
3752
  REQ_BGL = False
3753
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
3754
  _FIELDS_STATIC = utils.FieldSet("node")
3755

    
3756
  def CheckArguments(self):
3757
    _CheckOutputFields(static=self._FIELDS_STATIC,
3758
                       dynamic=self._FIELDS_DYNAMIC,
3759
                       selected=self.op.output_fields)
3760

    
3761
  def ExpandNames(self):
3762
    self.needed_locks = {}
3763
    self.share_locks[locking.LEVEL_NODE] = 1
3764
    if not self.op.nodes:
3765
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3766
    else:
3767
      self.needed_locks[locking.LEVEL_NODE] = \
3768
        _GetWantedNodes(self, self.op.nodes)
3769

    
3770
  def Exec(self, feedback_fn):
3771
    """Computes the list of nodes and their attributes.
3772

3773
    """
3774
    nodenames = self.acquired_locks[locking.LEVEL_NODE]
3775
    volumes = self.rpc.call_node_volumes(nodenames)
3776

    
3777
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
3778
             in self.cfg.GetInstanceList()]
3779

    
3780
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
3781

    
3782
    output = []
3783
    for node in nodenames:
3784
      nresult = volumes[node]
3785
      if nresult.offline:
3786
        continue
3787
      msg = nresult.fail_msg
3788
      if msg:
3789
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
3790
        continue
3791

    
3792
      node_vols = nresult.payload[:]
3793
      node_vols.sort(key=lambda vol: vol['dev'])
3794

    
3795
      for vol in node_vols:
3796
        node_output = []
3797
        for field in self.op.output_fields:
3798
          if field == "node":
3799
            val = node
3800
          elif field == "phys":
3801
            val = vol['dev']
3802
          elif field == "vg":
3803
            val = vol['vg']
3804
          elif field == "name":
3805
            val = vol['name']
3806
          elif field == "size":
3807
            val = int(float(vol['size']))
3808
          elif field == "instance":
3809
            for inst in ilist:
3810
              if node not in lv_by_node[inst]:
3811
                continue
3812
              if vol['name'] in lv_by_node[inst][node]:
3813
                val = inst.name
3814
                break
3815
            else:
3816
              val = '-'
3817
          else:
3818
            raise errors.ParameterError(field)
3819
          node_output.append(str(val))
3820

    
3821
        output.append(node_output)
3822

    
3823
    return output
3824

    
3825

    
3826
class LUNodeQueryStorage(NoHooksLU):
3827
  """Logical unit for getting information on storage units on node(s).
3828

3829
  """
3830
  _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
3831
  REQ_BGL = False
3832

    
3833
  def CheckArguments(self):
3834
    _CheckOutputFields(static=self._FIELDS_STATIC,
3835
                       dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
3836
                       selected=self.op.output_fields)
3837

    
3838
  def ExpandNames(self):
3839
    self.needed_locks = {}
3840
    self.share_locks[locking.LEVEL_NODE] = 1
3841

    
3842
    if self.op.nodes:
3843
      self.needed_locks[locking.LEVEL_NODE] = \
3844
        _GetWantedNodes(self, self.op.nodes)
3845
    else:
3846
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3847

    
3848
  def Exec(self, feedback_fn):
3849
    """Computes the list of nodes and their attributes.
3850

3851
    """
3852
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
3853

    
3854
    # Always get name to sort by
3855
    if constants.SF_NAME in self.op.output_fields:
3856
      fields = self.op.output_fields[:]
3857
    else:
3858
      fields = [constants.SF_NAME] + self.op.output_fields
3859

    
3860
    # Never ask for node or type as it's only known to the LU
3861
    for extra in [constants.SF_NODE, constants.SF_TYPE]:
3862
      while extra in fields:
3863
        fields.remove(extra)
3864

    
3865
    field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
3866
    name_idx = field_idx[constants.SF_NAME]
3867

    
3868
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
3869
    data = self.rpc.call_storage_list(self.nodes,
3870
                                      self.op.storage_type, st_args,
3871
                                      self.op.name, fields)
3872

    
3873
    result = []
3874

    
3875
    for node in utils.NiceSort(self.nodes):
3876
      nresult = data[node]
3877
      if nresult.offline:
3878
        continue
3879

    
3880
      msg = nresult.fail_msg
3881
      if msg:
3882
        self.LogWarning("Can't get storage data from node %s: %s", node, msg)
3883
        continue
3884

    
3885
      rows = dict([(row[name_idx], row) for row in nresult.payload])
3886

    
3887
      for name in utils.NiceSort(rows.keys()):
3888
        row = rows[name]
3889

    
3890
        out = []
3891

    
3892
        for field in self.op.output_fields:
3893
          if field == constants.SF_NODE:
3894
            val = node
3895
          elif field == constants.SF_TYPE:
3896
            val = self.op.storage_type
3897
          elif field in field_idx:
3898
            val = row[field_idx[field]]
3899
          else:
3900
            raise errors.ParameterError(field)
3901

    
3902
          out.append(val)
3903

    
3904
        result.append(out)
3905

    
3906
    return result
3907

    
3908

    
3909
class _InstanceQuery(_QueryBase):
3910
  FIELDS = query.INSTANCE_FIELDS
3911

    
3912
  def ExpandNames(self, lu):
3913
    lu.needed_locks = {}
3914
    lu.share_locks[locking.LEVEL_INSTANCE] = 1
3915
    lu.share_locks[locking.LEVEL_NODE] = 1
3916

    
3917
    if self.names:
3918
      self.wanted = _GetWantedInstances(lu, self.names)
3919
    else:
3920
      self.wanted = locking.ALL_SET
3921

    
3922
    self.do_locking = (self.use_locking and
3923
                       query.IQ_LIVE in self.requested_data)
3924
    if self.do_locking:
3925
      lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3926
      lu.needed_locks[locking.LEVEL_NODE] = []
3927
      lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3928

    
3929
  def DeclareLocks(self, lu, level):
3930
    if level == locking.LEVEL_NODE and self.do_locking:
3931
      lu._LockInstancesNodes() # pylint: disable-msg=W0212
3932

    
3933
  def _GetQueryData(self, lu):
3934
    """Computes the list of instances and their attributes.
3935

3936
    """
3937
    cluster = lu.cfg.GetClusterInfo()
3938
    all_info = lu.cfg.GetAllInstancesInfo()
3939

    
3940
    instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
3941

    
3942
    instance_list = [all_info[name] for name in instance_names]
3943
    nodes = frozenset(itertools.chain(*(inst.all_nodes
3944
                                        for inst in instance_list)))
3945
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
3946
    bad_nodes = []
3947
    offline_nodes = []
3948
    wrongnode_inst = set()
3949

    
3950
    # Gather data as requested
3951
    if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
3952
      live_data = {}
3953
      node_data = lu.rpc.call_all_instances_info(nodes, hv_list)
3954
      for name in nodes:
3955
        result = node_data[name]
3956
        if result.offline:
3957
          # offline nodes will be in both lists
3958
          assert result.fail_msg
3959
          offline_nodes.append(name)
3960
        if result.fail_msg:
3961
          bad_nodes.append(name)
3962
        elif result.payload:
3963
          for inst in result.payload:
3964
            if all_info[inst].primary_node == name:
3965
              live_data.update(result.payload)
3966
            else:
3967
              wrongnode_inst.add(inst)
3968
        # else no instance is alive
3969
    else:
3970
      live_data = {}
3971

    
3972
    if query.IQ_DISKUSAGE in self.requested_data:
3973
      disk_usage = dict((inst.name,
3974
                         _ComputeDiskSize(inst.disk_template,
3975
                                          [{"size": disk.size}
3976
                                           for disk in inst.disks]))
3977
                        for inst in instance_list)
3978
    else:
3979
      disk_usage = None
3980

    
3981
    if query.IQ_CONSOLE in self.requested_data:
3982
      consinfo = {}
3983
      for inst in instance_list:
3984
        if inst.name in live_data:
3985
          # Instance is running
3986
          consinfo[inst.name] = _GetInstanceConsole(cluster, inst)
3987
        else:
3988
          consinfo[inst.name] = None
3989
      assert set(consinfo.keys()) == set(instance_names)
3990
    else:
3991
      consinfo = None
3992

    
3993
    return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
3994
                                   disk_usage, offline_nodes, bad_nodes,
3995
                                   live_data, wrongnode_inst, consinfo)
3996

    
3997

    
3998
class LUQuery(NoHooksLU):
3999
  """Query for resources/items of a certain kind.
4000

4001
  """
4002
  # pylint: disable-msg=W0142
4003
  REQ_BGL = False
4004

    
4005
  def CheckArguments(self):
4006
    qcls = _GetQueryImplementation(self.op.what)
4007

    
4008
    self.impl = qcls(self.op.filter, self.op.fields, False)
4009

    
4010
  def ExpandNames(self):
4011
    self.impl.ExpandNames(self)
4012

    
4013
  def DeclareLocks(self, level):
4014
    self.impl.DeclareLocks(self, level)
4015

    
4016
  def Exec(self, feedback_fn):
4017
    return self.impl.NewStyleQuery(self)
4018

    
4019

    
4020
class LUQueryFields(NoHooksLU):
4021
  """Query for resources/items of a certain kind.
4022

4023
  """
4024
  # pylint: disable-msg=W0142
4025
  REQ_BGL = False
4026

    
4027
  def CheckArguments(self):
4028
    self.qcls = _GetQueryImplementation(self.op.what)
4029

    
4030
  def ExpandNames(self):
4031
    self.needed_locks = {}
4032

    
4033
  def Exec(self, feedback_fn):
4034
    return self.qcls.FieldsQuery(self.op.fields)
4035

    
4036

    
4037
class LUNodeModifyStorage(NoHooksLU):
4038
  """Logical unit for modifying a storage volume on a node.
4039

4040
  """
4041
  REQ_BGL = False
4042

    
4043
  def CheckArguments(self):
4044
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4045

    
4046
    storage_type = self.op.storage_type
4047

    
4048
    try:
4049
      modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
4050
    except KeyError:
4051
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
4052
                                 " modified" % storage_type,
4053
                                 errors.ECODE_INVAL)
4054

    
4055
    diff = set(self.op.changes.keys()) - modifiable
4056
    if diff:
4057
      raise errors.OpPrereqError("The following fields can not be modified for"
4058
                                 " storage units of type '%s': %r" %
4059
                                 (storage_type, list(diff)),
4060
                                 errors.ECODE_INVAL)
4061

    
4062
  def ExpandNames(self):
4063
    self.needed_locks = {
4064
      locking.LEVEL_NODE: self.op.node_name,
4065
      }
4066

    
4067
  def Exec(self, feedback_fn):
4068
    """Computes the list of nodes and their attributes.
4069

4070
    """
4071
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
4072
    result = self.rpc.call_storage_modify(self.op.node_name,
4073
                                          self.op.storage_type, st_args,
4074
                                          self.op.name, self.op.changes)
4075
    result.Raise("Failed to modify storage unit '%s' on %s" %
4076
                 (self.op.name, self.op.node_name))
4077

    
4078

    
4079
class LUNodeAdd(LogicalUnit):
4080
  """Logical unit for adding node to the cluster.
4081

4082
  """
4083
  HPATH = "node-add"
4084
  HTYPE = constants.HTYPE_NODE
4085
  _NFLAGS = ["master_capable", "vm_capable"]
4086

    
4087
  def CheckArguments(self):
4088
    self.primary_ip_family = self.cfg.GetPrimaryIPFamily()
4089
    # validate/normalize the node name
4090
    self.hostname = netutils.GetHostname(name=self.op.node_name,
4091
                                         family=self.primary_ip_family)
4092
    self.op.node_name = self.hostname.name
4093
    if self.op.readd and self.op.group:
4094
      raise errors.OpPrereqError("Cannot pass a node group when a node is"
4095
                                 " being readded", errors.ECODE_INVAL)
4096

    
4097
  def BuildHooksEnv(self):
4098
    """Build hooks env.
4099

4100
    This will run on all nodes before, and on all nodes + the new node after.
4101

4102
    """
4103
    env = {
4104
      "OP_TARGET": self.op.node_name,
4105
      "NODE_NAME": self.op.node_name,
4106
      "NODE_PIP": self.op.primary_ip,
4107
      "NODE_SIP": self.op.secondary_ip,
4108
      "MASTER_CAPABLE": str(self.op.master_capable),
4109
      "VM_CAPABLE": str(self.op.vm_capable),
4110
      }
4111
    nodes_0 = self.cfg.GetNodeList()
4112
    nodes_1 = nodes_0 + [self.op.node_name, ]
4113
    return env, nodes_0, nodes_1
4114

    
4115
  def CheckPrereq(self):
4116
    """Check prerequisites.
4117

4118
    This checks:
4119
     - the new node is not already in the config
4120
     - it is resolvable
4121
     - its parameters (single/dual homed) matches the cluster
4122

4123
    Any errors are signaled by raising errors.OpPrereqError.
4124

4125
    """
4126
    cfg = self.cfg
4127
    hostname = self.hostname
4128
    node = hostname.name
4129
    primary_ip = self.op.primary_ip = hostname.ip
4130
    if self.op.secondary_ip is None:
4131
      if self.primary_ip_family == netutils.IP6Address.family:
4132
        raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
4133
                                   " IPv4 address must be given as secondary",
4134
                                   errors.ECODE_INVAL)
4135
      self.op.secondary_ip = primary_ip
4136

    
4137
    secondary_ip = self.op.secondary_ip
4138
    if not netutils.IP4Address.IsValid(secondary_ip):
4139
      raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
4140
                                 " address" % secondary_ip, errors.ECODE_INVAL)
4141

    
4142
    node_list = cfg.GetNodeList()
4143
    if not self.op.readd and node in node_list:
4144
      raise errors.OpPrereqError("Node %s is already in the configuration" %
4145
                                 node, errors.ECODE_EXISTS)
4146
    elif self.op.readd and node not in node_list:
4147
      raise errors.OpPrereqError("Node %s is not in the configuration" % node,
4148
                                 errors.ECODE_NOENT)
4149

    
4150
    self.changed_primary_ip = False
4151

    
4152
    for existing_node_name in node_list:
4153
      existing_node = cfg.GetNodeInfo(existing_node_name)
4154

    
4155
      if self.op.readd and node == existing_node_name:
4156
        if existing_node.secondary_ip != secondary_ip:
4157
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
4158
                                     " address configuration as before",
4159
                                     errors.ECODE_INVAL)
4160
        if existing_node.primary_ip != primary_ip:
4161
          self.changed_primary_ip = True
4162

    
4163
        continue
4164

    
4165
      if (existing_node.primary_ip == primary_ip or
4166
          existing_node.secondary_ip == primary_ip or
4167
          existing_node.primary_ip == secondary_ip or
4168
          existing_node.secondary_ip == secondary_ip):
4169
        raise errors.OpPrereqError("New node ip address(es) conflict with"
4170
                                   " existing node %s" % existing_node.name,
4171
                                   errors.ECODE_NOTUNIQUE)
4172

    
4173
    # After this 'if' block, None is no longer a valid value for the
4174
    # _capable op attributes
4175
    if self.op.readd:
4176
      old_node = self.cfg.GetNodeInfo(node)
4177
      assert old_node is not None, "Can't retrieve locked node %s" % node
4178
      for attr in self._NFLAGS:
4179
        if getattr(self.op, attr) is None:
4180
          setattr(self.op, attr, getattr(old_node, attr))
4181
    else:
4182
      for attr in self._NFLAGS:
4183
        if getattr(self.op, attr) is None:
4184
          setattr(self.op, attr, True)
4185

    
4186
    if self.op.readd and not self.op.vm_capable:
4187
      pri, sec = cfg.GetNodeInstances(node)
4188
      if pri or sec:
4189
        raise errors.OpPrereqError("Node %s being re-added with vm_capable"
4190
                                   " flag set to false, but it already holds"
4191
                                   " instances" % node,
4192
                                   errors.ECODE_STATE)
4193

    
4194
    # check that the type of the node (single versus dual homed) is the
4195
    # same as for the master
4196
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
4197
    master_singlehomed = myself.secondary_ip == myself.primary_ip
4198
    newbie_singlehomed = secondary_ip == primary_ip
4199
    if master_singlehomed != newbie_singlehomed:
4200
      if master_singlehomed:
4201
        raise errors.OpPrereqError("The master has no secondary ip but the"
4202
                                   " new node has one",
4203
                                   errors.ECODE_INVAL)
4204
      else:
4205
        raise errors.OpPrereqError("The master has a secondary ip but the"
4206
                                   " new node doesn't have one",
4207
                                   errors.ECODE_INVAL)
4208

    
4209
    # checks reachability
4210
    if not netutils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
4211
      raise errors.OpPrereqError("Node not reachable by ping",
4212
                                 errors.ECODE_ENVIRON)
4213

    
4214
    if not newbie_singlehomed:
4215
      # check reachability from my secondary ip to newbie's secondary ip
4216
      if not netutils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
4217
                           source=myself.secondary_ip):
4218
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
4219
                                   " based ping to node daemon port",
4220
                                   errors.ECODE_ENVIRON)
4221

    
4222
    if self.op.readd:
4223
      exceptions = [node]
4224
    else:
4225
      exceptions = []
4226

    
4227
    if self.op.master_capable:
4228
      self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
4229
    else:
4230
      self.master_candidate = False
4231

    
4232
    if self.op.readd:
4233
      self.new_node = old_node
4234
    else:
4235
      node_group = cfg.LookupNodeGroup(self.op.group)
4236
      self.new_node = objects.Node(name=node,
4237
                                   primary_ip=primary_ip,
4238
                                   secondary_ip=secondary_ip,
4239
                                   master_candidate=self.master_candidate,
4240
                                   offline=False, drained=False,
4241
                                   group=node_group)
4242

    
4243
    if self.op.ndparams:
4244
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
4245

    
4246
  def Exec(self, feedback_fn):
4247
    """Adds the new node to the cluster.
4248

4249
    """
4250
    new_node = self.new_node
4251
    node = new_node.name
4252

    
4253
    # We adding a new node so we assume it's powered
4254
    new_node.powered = True
4255

    
4256
    # for re-adds, reset the offline/drained/master-candidate flags;
4257
    # we need to reset here, otherwise offline would prevent RPC calls
4258
    # later in the procedure; this also means that if the re-add
4259
    # fails, we are left with a non-offlined, broken node
4260
    if self.op.readd:
4261
      new_node.drained = new_node.offline = False # pylint: disable-msg=W0201
4262
      self.LogInfo("Readding a node, the offline/drained flags were reset")
4263
      # if we demote the node, we do cleanup later in the procedure
4264
      new_node.master_candidate = self.master_candidate
4265
      if self.changed_primary_ip:
4266
        new_node.primary_ip = self.op.primary_ip
4267

    
4268
    # copy the master/vm_capable flags
4269
    for attr in self._NFLAGS:
4270
      setattr(new_node, attr, getattr(self.op, attr))
4271

    
4272
    # notify the user about any possible mc promotion
4273
    if new_node.master_candidate:
4274
      self.LogInfo("Node will be a master candidate")
4275

    
4276
    if self.op.ndparams:
4277
      new_node.ndparams = self.op.ndparams
4278
    else:
4279
      new_node.ndparams = {}
4280

    
4281
    # check connectivity
4282
    result = self.rpc.call_version([node])[node]
4283
    result.Raise("Can't get version information from node %s" % node)
4284
    if constants.PROTOCOL_VERSION == result.payload:
4285
      logging.info("Communication to node %s fine, sw version %s match",
4286
                   node, result.payload)
4287
    else:
4288
      raise errors.OpExecError("Version mismatch master version %s,"
4289
                               " node version %s" %
4290
                               (constants.PROTOCOL_VERSION, result.payload))
4291

    
4292
    # Add node to our /etc/hosts, and add key to known_hosts
4293
    if self.cfg.GetClusterInfo().modify_etc_hosts:
4294
      master_node = self.cfg.GetMasterNode()
4295
      result = self.rpc.call_etc_hosts_modify(master_node,
4296
                                              constants.ETC_HOSTS_ADD,
4297
                                              self.hostname.name,
4298
                                              self.hostname.ip)
4299
      result.Raise("Can't update hosts file with new host data")
4300

    
4301
    if new_node.secondary_ip != new_node.primary_ip:
4302
      _CheckNodeHasSecondaryIP(self, new_node.name, new_node.secondary_ip,
4303
                               False)
4304

    
4305
    node_verify_list = [self.cfg.GetMasterNode()]
4306
    node_verify_param = {
4307
      constants.NV_NODELIST: [node],
4308
      # TODO: do a node-net-test as well?
4309
    }
4310

    
4311
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
4312
                                       self.cfg.GetClusterName())
4313
    for verifier in node_verify_list:
4314
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
4315
      nl_payload = result[verifier].payload[constants.NV_NODELIST]
4316
      if nl_payload:
4317
        for failed in nl_payload:
4318
          feedback_fn("ssh/hostname verification failed"
4319
                      " (checking from %s): %s" %
4320
                      (verifier, nl_payload[failed]))
4321
        raise errors.OpExecError("ssh/hostname verification failed.")
4322

    
4323
    if self.op.readd:
4324
      _RedistributeAncillaryFiles(self)
4325
      self.context.ReaddNode(new_node)
4326
      # make sure we redistribute the config
4327
      self.cfg.Update(new_node, feedback_fn)
4328
      # and make sure the new node will not have old files around
4329
      if not new_node.master_candidate:
4330
        result = self.rpc.call_node_demote_from_mc(new_node.name)
4331
        msg = result.fail_msg
4332
        if msg:
4333
          self.LogWarning("Node failed to demote itself from master"
4334
                          " candidate status: %s" % msg)
4335
    else:
4336
      _RedistributeAncillaryFiles(self, additional_nodes=[node],
4337
                                  additional_vm=self.op.vm_capable)
4338
      self.context.AddNode(new_node, self.proc.GetECId())
4339

    
4340

    
4341
class LUNodeSetParams(LogicalUnit):
4342
  """Modifies the parameters of a node.
4343

4344
  @cvar _F2R: a dictionary from tuples of flags (mc, drained, offline)
4345
      to the node role (as _ROLE_*)
4346
  @cvar _R2F: a dictionary from node role to tuples of flags
4347
  @cvar _FLAGS: a list of attribute names corresponding to the flags
4348

4349
  """
4350
  HPATH = "node-modify"
4351
  HTYPE = constants.HTYPE_NODE
4352
  REQ_BGL = False
4353
  (_ROLE_CANDIDATE, _ROLE_DRAINED, _ROLE_OFFLINE, _ROLE_REGULAR) = range(4)
4354
  _F2R = {
4355
    (True, False, False): _ROLE_CANDIDATE,
4356
    (False, True, False): _ROLE_DRAINED,
4357
    (False, False, True): _ROLE_OFFLINE,
4358
    (False, False, False): _ROLE_REGULAR,
4359
    }
4360
  _R2F = dict((v, k) for k, v in _F2R.items())
4361
  _FLAGS = ["master_candidate", "drained", "offline"]
4362

    
4363
  def CheckArguments(self):
4364
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4365
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained,
4366
                self.op.master_capable, self.op.vm_capable,
4367
                self.op.secondary_ip, self.op.ndparams]
4368
    if all_mods.count(None) == len(all_mods):
4369
      raise errors.OpPrereqError("Please pass at least one modification",
4370
                                 errors.ECODE_INVAL)
4371
    if all_mods.count(True) > 1:
4372
      raise errors.OpPrereqError("Can't set the node into more than one"
4373
                                 " state at the same time",
4374
                                 errors.ECODE_INVAL)
4375

    
4376
    # Boolean value that tells us whether we might be demoting from MC
4377
    self.might_demote = (self.op.master_candidate == False or
4378
                         self.op.offline == True or
4379
                         self.op.drained == True or
4380
                         self.op.master_capable == False)
4381

    
4382
    if self.op.secondary_ip:
4383
      if not netutils.IP4Address.IsValid(self.op.secondary_ip):
4384
        raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
4385
                                   " address" % self.op.secondary_ip,
4386
                                   errors.ECODE_INVAL)
4387

    
4388
    self.lock_all = self.op.auto_promote and self.might_demote
4389
    self.lock_instances = self.op.secondary_ip is not None
4390

    
4391
  def ExpandNames(self):
4392
    if self.lock_all:
4393
      self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
4394
    else:
4395
      self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
4396

    
4397
    if self.lock_instances:
4398
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
4399

    
4400
  def DeclareLocks(self, level):
4401
    # If we have locked all instances, before waiting to lock nodes, release
4402
    # all the ones living on nodes unrelated to the current operation.
4403
    if level == locking.LEVEL_NODE and self.lock_instances:
4404
      instances_release = []
4405
      instances_keep = []
4406
      self.affected_instances = []
4407
      if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4408
        for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
4409
          instance = self.context.cfg.GetInstanceInfo(instance_name)
4410
          i_mirrored = instance.disk_template in constants.DTS_INT_MIRROR
4411
          if i_mirrored and self.op.node_name in instance.all_nodes:
4412
            instances_keep.append(instance_name)
4413
            self.affected_instances.append(instance)
4414
          else:
4415
            instances_release.append(instance_name)
4416
        if instances_release:
4417
          self.context.glm.release(locking.LEVEL_INSTANCE, instances_release)
4418
          self.acquired_locks[locking.LEVEL_INSTANCE] = instances_keep
4419

    
4420
  def BuildHooksEnv(self):
4421
    """Build hooks env.
4422

4423
    This runs on the master node.
4424

4425
    """
4426
    env = {
4427
      "OP_TARGET": self.op.node_name,
4428
      "MASTER_CANDIDATE": str(self.op.master_candidate),
4429
      "OFFLINE": str(self.op.offline),
4430
      "DRAINED": str(self.op.drained),
4431
      "MASTER_CAPABLE": str(self.op.master_capable),
4432
      "VM_CAPABLE": str(self.op.vm_capable),
4433
      }
4434
    nl = [self.cfg.GetMasterNode(),
4435
          self.op.node_name]
4436
    return env, nl, nl
4437

    
4438
  def CheckPrereq(self):
4439
    """Check prerequisites.
4440

4441
    This only checks the instance list against the existing names.
4442

4443
    """
4444
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
4445

    
4446
    if (self.op.master_candidate is not None or
4447
        self.op.drained is not None or
4448
        self.op.offline is not None):
4449
      # we can't change the master's node flags
4450
      if self.op.node_name == self.cfg.GetMasterNode():
4451
        raise errors.OpPrereqError("The master role can be changed"
4452
                                   " only via master-failover",
4453
                                   errors.ECODE_INVAL)
4454

    
4455
    if self.op.master_candidate and not node.master_capable:
4456
      raise errors.OpPrereqError("Node %s is not master capable, cannot make"
4457
                                 " it a master candidate" % node.name,
4458
                                 errors.ECODE_STATE)
4459

    
4460
    if self.op.vm_capable == False:
4461
      (ipri, isec) = self.cfg.GetNodeInstances(self.op.node_name)
4462
      if ipri or isec:
4463
        raise errors.OpPrereqError("Node %s hosts instances, cannot unset"
4464
                                   " the vm_capable flag" % node.name,
4465
                                   errors.ECODE_STATE)
4466

    
4467
    if node.master_candidate and self.might_demote and not self.lock_all:
4468
      assert not self.op.auto_promote, "auto_promote set but lock_all not"
4469
      # check if after removing the current node, we're missing master
4470
      # candidates
4471
      (mc_remaining, mc_should, _) = \
4472
          self.cfg.GetMasterCandidateStats(exceptions=[node.name])
4473
      if mc_remaining < mc_should:
4474
        raise errors.OpPrereqError("Not enough master candidates, please"
4475
                                   " pass auto promote option to allow"
4476
                                   " promotion", errors.ECODE_STATE)
4477

    
4478
    self.old_flags = old_flags = (node.master_candidate,
4479
                                  node.drained, node.offline)
4480
    assert old_flags in self._F2R, "Un-handled old flags  %s" % str(old_flags)
4481
    self.old_role = old_role = self._F2R[old_flags]
4482

    
4483
    # Check for ineffective changes
4484
    for attr in self._FLAGS:
4485
      if (getattr(self.op, attr) == False and getattr(node, attr) == False):
4486
        self.LogInfo("Ignoring request to unset flag %s, already unset", attr)
4487
        setattr(self.op, attr, None)
4488

    
4489
    # Past this point, any flag change to False means a transition
4490
    # away from the respective state, as only real changes are kept
4491

    
4492
    # TODO: We might query the real power state if it supports OOB
4493
    if _SupportsOob(self.cfg, node):
4494
      if self.op.offline is False and not (node.powered or
4495
                                           self.op.powered == True):
4496
        raise errors.OpPrereqError(("Please power on node %s first before you"
4497
                                    " can reset offline state") %
4498
                                   self.op.node_name)
4499
    elif self.op.powered is not None:
4500
      raise errors.OpPrereqError(("Unable to change powered state for node %s"
4501
                                  " which does not support out-of-band"
4502
                                  " handling") % self.op.node_name)
4503

    
4504
    # If we're being deofflined/drained, we'll MC ourself if needed
4505
    if (self.op.drained == False or self.op.offline == False or
4506
        (self.op.master_capable and not node.master_capable)):
4507
      if _DecideSelfPromotion(self):
4508
        self.op.master_candidate = True
4509
        self.LogInfo("Auto-promoting node to master candidate")
4510

    
4511
    # If we're no longer master capable, we'll demote ourselves from MC
4512
    if self.op.master_capable == False and node.master_candidate:
4513
      self.LogInfo("Demoting from master candidate")
4514
      self.op.master_candidate = False
4515

    
4516
    # Compute new role
4517
    assert [getattr(self.op, attr) for attr in self._FLAGS].count(True) <= 1
4518
    if self.op.master_candidate:
4519
      new_role = self._ROLE_CANDIDATE
4520
    elif self.op.drained:
4521
      new_role = self._ROLE_DRAINED
4522
    elif self.op.offline:
4523
      new_role = self._ROLE_OFFLINE
4524
    elif False in [self.op.master_candidate, self.op.drained, self.op.offline]:
4525
      # False is still in new flags, which means we're un-setting (the
4526
      # only) True flag
4527
      new_role = self._ROLE_REGULAR
4528
    else: # no new flags, nothing, keep old role
4529
      new_role = old_role
4530

    
4531
    self.new_role = new_role
4532

    
4533
    if old_role == self._ROLE_OFFLINE and new_role != old_role:
4534
      # Trying to transition out of offline status
4535
      result = self.rpc.call_version([node.name])[node.name]
4536
      if result.fail_msg:
4537
        raise errors.OpPrereqError("Node %s is being de-offlined but fails"
4538
                                   " to report its version: %s" %
4539
                                   (node.name, result.fail_msg),
4540
                                   errors.ECODE_STATE)
4541
      else:
4542
        self.LogWarning("Transitioning node from offline to online state"
4543
                        " without using re-add. Please make sure the node"
4544
                        " is healthy!")
4545

    
4546
    if self.op.secondary_ip:
4547
      # Ok even without locking, because this can't be changed by any LU
4548
      master = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
4549
      master_singlehomed = master.secondary_ip == master.primary_ip
4550
      if master_singlehomed and self.op.secondary_ip:
4551
        raise errors.OpPrereqError("Cannot change the secondary ip on a single"
4552
                                   " homed cluster", errors.ECODE_INVAL)
4553

    
4554
      if node.offline:
4555
        if self.affected_instances:
4556
          raise errors.OpPrereqError("Cannot change secondary ip: offline"
4557
                                     " node has instances (%s) configured"
4558
                                     " to use it" % self.affected_instances)
4559
      else:
4560
        # On online nodes, check that no instances are running, and that
4561
        # the node has the new ip and we can reach it.
4562
        for instance in self.affected_instances:
4563
          _CheckInstanceDown(self, instance, "cannot change secondary ip")
4564

    
4565
        _CheckNodeHasSecondaryIP(self, node.name, self.op.secondary_ip, True)
4566
        if master.name != node.name:
4567
          # check reachability from master secondary ip to new secondary ip
4568
          if not netutils.TcpPing(self.op.secondary_ip,
4569
                                  constants.DEFAULT_NODED_PORT,
4570
                                  source=master.secondary_ip):
4571
            raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
4572
                                       " based ping to node daemon port",
4573
                                       errors.ECODE_ENVIRON)
4574

    
4575
    if self.op.ndparams:
4576
      new_ndparams = _GetUpdatedParams(self.node.ndparams, self.op.ndparams)
4577
      utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
4578
      self.new_ndparams = new_ndparams
4579

    
4580
  def Exec(self, feedback_fn):
4581
    """Modifies a node.
4582

4583
    """
4584
    node = self.node
4585
    old_role = self.old_role
4586
    new_role = self.new_role
4587

    
4588
    result = []
4589

    
4590
    if self.op.ndparams:
4591
      node.ndparams = self.new_ndparams
4592

    
4593
    if self.op.powered is not None:
4594
      node.powered = self.op.powered
4595

    
4596
    for attr in ["master_capable", "vm_capable"]:
4597
      val = getattr(self.op, attr)
4598
      if val is not None:
4599
        setattr(node, attr, val)
4600
        result.append((attr, str(val)))
4601

    
4602
    if new_role != old_role:
4603
      # Tell the node to demote itself, if no longer MC and not offline
4604
      if old_role == self._ROLE_CANDIDATE and new_role != self._ROLE_OFFLINE:
4605
        msg = self.rpc.call_node_demote_from_mc(node.name).fail_msg
4606
        if msg:
4607
          self.LogWarning("Node failed to demote itself: %s", msg)
4608

    
4609
      new_flags = self._R2F[new_role]
4610
      for of, nf, desc in zip(self.old_flags, new_flags, self._FLAGS):
4611
        if of != nf:
4612
          result.append((desc, str(nf)))
4613
      (node.master_candidate, node.drained, node.offline) = new_flags
4614

    
4615
      # we locked all nodes, we adjust the CP before updating this node
4616
      if self.lock_all:
4617
        _AdjustCandidatePool(self, [node.name])
4618

    
4619
    if self.op.secondary_ip:
4620
      node.secondary_ip = self.op.secondary_ip
4621
      result.append(("secondary_ip", self.op.secondary_ip))
4622

    
4623
    # this will trigger configuration file update, if needed
4624
    self.cfg.Update(node, feedback_fn)
4625

    
4626
    # this will trigger job queue propagation or cleanup if the mc
4627
    # flag changed
4628
    if [old_role, new_role].count(self._ROLE_CANDIDATE) == 1:
4629
      self.context.ReaddNode(node)
4630

    
4631
    return result
4632

    
4633

    
4634
class LUNodePowercycle(NoHooksLU):
4635
  """Powercycles a node.
4636

4637
  """
4638
  REQ_BGL = False
4639

    
4640
  def CheckArguments(self):
4641
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4642
    if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
4643
      raise errors.OpPrereqError("The node is the master and the force"
4644
                                 " parameter was not set",
4645
                                 errors.ECODE_INVAL)
4646

    
4647
  def ExpandNames(self):
4648
    """Locking for PowercycleNode.
4649

4650
    This is a last-resort option and shouldn't block on other
4651
    jobs. Therefore, we grab no locks.
4652

4653
    """
4654
    self.needed_locks = {}
4655

    
4656
  def Exec(self, feedback_fn):
4657
    """Reboots a node.
4658

4659
    """
4660
    result = self.rpc.call_node_powercycle(self.op.node_name,
4661
                                           self.cfg.GetHypervisorType())
4662
    result.Raise("Failed to schedule the reboot")
4663
    return result.payload
4664

    
4665

    
4666
class LUClusterQuery(NoHooksLU):
4667
  """Query cluster configuration.
4668

4669
  """
4670
  REQ_BGL = False
4671

    
4672
  def ExpandNames(self):
4673
    self.needed_locks = {}
4674

    
4675
  def Exec(self, feedback_fn):
4676
    """Return cluster config.
4677

4678
    """
4679
    cluster = self.cfg.GetClusterInfo()
4680
    os_hvp = {}
4681

    
4682
    # Filter just for enabled hypervisors
4683
    for os_name, hv_dict in cluster.os_hvp.items():
4684
      os_hvp[os_name] = {}
4685
      for hv_name, hv_params in hv_dict.items():
4686
        if hv_name in cluster.enabled_hypervisors:
4687
          os_hvp[os_name][hv_name] = hv_params
4688

    
4689
    # Convert ip_family to ip_version
4690
    primary_ip_version = constants.IP4_VERSION
4691
    if cluster.primary_ip_family == netutils.IP6Address.family:
4692
      primary_ip_version = constants.IP6_VERSION
4693

    
4694
    result = {
4695
      "software_version": constants.RELEASE_VERSION,
4696
      "protocol_version": constants.PROTOCOL_VERSION,
4697
      "config_version": constants.CONFIG_VERSION,
4698
      "os_api_version": max(constants.OS_API_VERSIONS),
4699
      "export_version": constants.EXPORT_VERSION,
4700
      "architecture": (platform.architecture()[0], platform.machine()),
4701
      "name": cluster.cluster_name,
4702
      "master": cluster.master_node,
4703
      "default_hypervisor": cluster.enabled_hypervisors[0],
4704
      "enabled_hypervisors": cluster.enabled_hypervisors,
4705
      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
4706
                        for hypervisor_name in cluster.enabled_hypervisors]),
4707
      "os_hvp": os_hvp,
4708
      "beparams": cluster.beparams,
4709
      "osparams": cluster.osparams,
4710
      "nicparams": cluster.nicparams,
4711
      "ndparams": cluster.ndparams,
4712
      "candidate_pool_size": cluster.candidate_pool_size,
4713
      "master_netdev": cluster.master_netdev,
4714
      "volume_group_name": cluster.volume_group_name,
4715
      "drbd_usermode_helper": cluster.drbd_usermode_helper,
4716
      "file_storage_dir": cluster.file_storage_dir,
4717
      "shared_file_storage_dir": cluster.shared_file_storage_dir,
4718
      "maintain_node_health": cluster.maintain_node_health,
4719
      "ctime": cluster.ctime,
4720
      "mtime": cluster.mtime,
4721
      "uuid": cluster.uuid,
4722
      "tags": list(cluster.GetTags()),
4723
      "uid_pool": cluster.uid_pool,
4724
      "default_iallocator": cluster.default_iallocator,
4725
      "reserved_lvs": cluster.reserved_lvs,
4726
      "primary_ip_version": primary_ip_version,
4727
      "prealloc_wipe_disks": cluster.prealloc_wipe_disks,
4728
      "hidden_os": cluster.hidden_os,
4729
      "blacklisted_os": cluster.blacklisted_os,
4730
      }
4731

    
4732
    return result
4733

    
4734

    
4735
class LUClusterConfigQuery(NoHooksLU):
4736
  """Return configuration values.
4737

4738
  """
4739
  REQ_BGL = False
4740
  _FIELDS_DYNAMIC = utils.FieldSet()
4741
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag",
4742
                                  "watcher_pause", "volume_group_name")
4743

    
4744
  def CheckArguments(self):
4745
    _CheckOutputFields(static=self._FIELDS_STATIC,
4746
                       dynamic=self._FIELDS_DYNAMIC,
4747
                       selected=self.op.output_fields)
4748

    
4749
  def ExpandNames(self):
4750
    self.needed_locks = {}
4751

    
4752
  def Exec(self, feedback_fn):
4753
    """Dump a representation of the cluster config to the standard output.
4754

4755
    """
4756
    values = []
4757
    for field in self.op.output_fields:
4758
      if field == "cluster_name":
4759
        entry = self.cfg.GetClusterName()
4760
      elif field == "master_node":
4761
        entry = self.cfg.GetMasterNode()
4762
      elif field == "drain_flag":
4763
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
4764
      elif field == "watcher_pause":
4765
        entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
4766
      elif field == "volume_group_name":
4767
        entry = self.cfg.GetVGName()
4768
      else:
4769
        raise errors.ParameterError(field)
4770
      values.append(entry)
4771
    return values
4772

    
4773

    
4774
class LUInstanceActivateDisks(NoHooksLU):
4775
  """Bring up an instance's disks.
4776

4777
  """
4778
  REQ_BGL = False
4779

    
4780
  def ExpandNames(self):
4781
    self._ExpandAndLockInstance()
4782
    self.needed_locks[locking.LEVEL_NODE] = []
4783
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4784

    
4785
  def DeclareLocks(self, level):
4786
    if level == locking.LEVEL_NODE:
4787
      self._LockInstancesNodes()
4788

    
4789
  def CheckPrereq(self):
4790
    """Check prerequisites.
4791

4792
    This checks that the instance is in the cluster.
4793

4794
    """
4795
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4796
    assert self.instance is not None, \
4797
      "Cannot retrieve locked instance %s" % self.op.instance_name
4798
    _CheckNodeOnline(self, self.instance.primary_node)
4799

    
4800
  def Exec(self, feedback_fn):
4801
    """Activate the disks.
4802

4803
    """
4804
    disks_ok, disks_info = \
4805
              _AssembleInstanceDisks(self, self.instance,
4806
                                     ignore_size=self.op.ignore_size)
4807
    if not disks_ok:
4808
      raise errors.OpExecError("Cannot activate block devices")
4809

    
4810
    return disks_info
4811

    
4812

    
4813
def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
4814
                           ignore_size=False):
4815
  """Prepare the block devices for an instance.
4816

4817
  This sets up the block devices on all nodes.
4818

4819
  @type lu: L{LogicalUnit}
4820
  @param lu: the logical unit on whose behalf we execute
4821
  @type instance: L{objects.Instance}
4822
  @param instance: the instance for whose disks we assemble
4823
  @type disks: list of L{objects.Disk} or None
4824
  @param disks: which disks to assemble (or all, if None)
4825
  @type ignore_secondaries: boolean
4826
  @param ignore_secondaries: if true, errors on secondary nodes
4827
      won't result in an error return from the function
4828
  @type ignore_size: boolean
4829
  @param ignore_size: if true, the current known size of the disk
4830
      will not be used during the disk activation, useful for cases
4831
      when the size is wrong
4832
  @return: False if the operation failed, otherwise a list of
4833
      (host, instance_visible_name, node_visible_name)
4834
      with the mapping from node devices to instance devices
4835

4836
  """
4837
  device_info = []
4838
  disks_ok = True
4839
  iname = instance.name
4840
  disks = _ExpandCheckDisks(instance, disks)
4841

    
4842
  # With the two passes mechanism we try to reduce the window of
4843
  # opportunity for the race condition of switching DRBD to primary
4844
  # before handshaking occured, but we do not eliminate it
4845

    
4846
  # The proper fix would be to wait (with some limits) until the
4847
  # connection has been made and drbd transitions from WFConnection
4848
  # into any other network-connected state (Connected, SyncTarget,
4849
  # SyncSource, etc.)
4850

    
4851
  # 1st pass, assemble on all nodes in secondary mode
4852
  for idx, inst_disk in enumerate(disks):
4853
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
4854
      if ignore_size:
4855
        node_disk = node_disk.Copy()
4856
        node_disk.UnsetSize()
4857
      lu.cfg.SetDiskID(node_disk, node)
4858
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False, idx)
4859
      msg = result.fail_msg
4860
      if msg:
4861
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
4862
                           " (is_primary=False, pass=1): %s",
4863
                           inst_disk.iv_name, node, msg)
4864
        if not ignore_secondaries:
4865
          disks_ok = False
4866

    
4867
  # FIXME: race condition on drbd migration to primary
4868

    
4869
  # 2nd pass, do only the primary node
4870
  for idx, inst_disk in enumerate(disks):
4871
    dev_path = None
4872

    
4873
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
4874
      if node != instance.primary_node:
4875
        continue
4876
      if ignore_size:
4877
        node_disk = node_disk.Copy()
4878
        node_disk.UnsetSize()
4879
      lu.cfg.SetDiskID(node_disk, node)
4880
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True, idx)
4881
      msg = result.fail_msg
4882
      if msg:
4883
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
4884
                           " (is_primary=True, pass=2): %s",
4885
                           inst_disk.iv_name, node, msg)
4886
        disks_ok = False
4887
      else:
4888
        dev_path = result.payload
4889

    
4890
    device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
4891

    
4892
  # leave the disks configured for the primary node
4893
  # this is a workaround that would be fixed better by
4894
  # improving the logical/physical id handling
4895
  for disk in disks:
4896
    lu.cfg.SetDiskID(disk, instance.primary_node)
4897

    
4898
  return disks_ok, device_info
4899

    
4900

    
4901
def _StartInstanceDisks(lu, instance, force):
4902
  """Start the disks of an instance.
4903

4904
  """
4905
  disks_ok, _ = _AssembleInstanceDisks(lu, instance,
4906
                                           ignore_secondaries=force)
4907
  if not disks_ok:
4908
    _ShutdownInstanceDisks(lu, instance)
4909
    if force is not None and not force:
4910
      lu.proc.LogWarning("", hint="If the message above refers to a"
4911
                         " secondary node,"
4912
                         " you can retry the operation using '--force'.")
4913
    raise errors.OpExecError("Disk consistency error")
4914

    
4915

    
4916
class LUInstanceDeactivateDisks(NoHooksLU):
4917
  """Shutdown an instance's disks.
4918

4919
  """
4920
  REQ_BGL = False
4921

    
4922
  def ExpandNames(self):
4923
    self._ExpandAndLockInstance()
4924
    self.needed_locks[locking.LEVEL_NODE] = []
4925
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4926

    
4927
  def DeclareLocks(self, level):
4928
    if level == locking.LEVEL_NODE:
4929
      self._LockInstancesNodes()
4930

    
4931
  def CheckPrereq(self):
4932
    """Check prerequisites.
4933

4934
    This checks that the instance is in the cluster.
4935

4936
    """
4937
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4938
    assert self.instance is not None, \
4939
      "Cannot retrieve locked instance %s" % self.op.instance_name
4940

    
4941
  def Exec(self, feedback_fn):
4942
    """Deactivate the disks
4943

4944
    """
4945
    instance = self.instance
4946
    if self.op.force:
4947
      _ShutdownInstanceDisks(self, instance)
4948
    else:
4949
      _SafeShutdownInstanceDisks(self, instance)
4950

    
4951

    
4952
def _SafeShutdownInstanceDisks(lu, instance, disks=None):
4953
  """Shutdown block devices of an instance.
4954

4955
  This function checks if an instance is running, before calling
4956
  _ShutdownInstanceDisks.
4957

4958
  """
4959
  _CheckInstanceDown(lu, instance, "cannot shutdown disks")
4960
  _ShutdownInstanceDisks(lu, instance, disks=disks)
4961

    
4962

    
4963
def _ExpandCheckDisks(instance, disks):
4964
  """Return the instance disks selected by the disks list
4965

4966
  @type disks: list of L{objects.Disk} or None
4967
  @param disks: selected disks
4968
  @rtype: list of L{objects.Disk}
4969
  @return: selected instance disks to act on
4970

4971
  """
4972
  if disks is None:
4973
    return instance.disks
4974
  else:
4975
    if not set(disks).issubset(instance.disks):
4976
      raise errors.ProgrammerError("Can only act on disks belonging to the"
4977
                                   " target instance")
4978
    return disks
4979

    
4980

    
4981
def _ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
4982
  """Shutdown block devices of an instance.
4983

4984
  This does the shutdown on all nodes of the instance.
4985

4986
  If the ignore_primary is false, errors on the primary node are
4987
  ignored.
4988

4989
  """
4990
  all_result = True
4991
  disks = _ExpandCheckDisks(instance, disks)
4992

    
4993
  for disk in disks:
4994
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
4995
      lu.cfg.SetDiskID(top_disk, node)
4996
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
4997
      msg = result.fail_msg
4998
      if msg:
4999
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
5000
                      disk.iv_name, node, msg)
5001
        if ((node == instance.primary_node and not ignore_primary) or
5002
            (node != instance.primary_node and not result.offline)):
5003
          all_result = False
5004
  return all_result
5005

    
5006

    
5007
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
5008
  """Checks if a node has enough free memory.
5009

5010
  This function check if a given node has the needed amount of free
5011
  memory. In case the node has less memory or we cannot get the
5012
  information from the node, this function raise an OpPrereqError
5013
  exception.
5014

5015
  @type lu: C{LogicalUnit}
5016
  @param lu: a logical unit from which we get configuration data
5017
  @type node: C{str}
5018
  @param node: the node to check
5019
  @type reason: C{str}
5020
  @param reason: string to use in the error message
5021
  @type requested: C{int}
5022
  @param requested: the amount of memory in MiB to check for
5023
  @type hypervisor_name: C{str}
5024
  @param hypervisor_name: the hypervisor to ask for memory stats
5025
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
5026
      we cannot check the node
5027

5028
  """
5029
  nodeinfo = lu.rpc.call_node_info([node], None, hypervisor_name)
5030
  nodeinfo[node].Raise("Can't get data from node %s" % node,
5031
                       prereq=True, ecode=errors.ECODE_ENVIRON)
5032
  free_mem = nodeinfo[node].payload.get('memory_free', None)
5033
  if not isinstance(free_mem, int):
5034
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
5035
                               " was '%s'" % (node, free_mem),
5036
                               errors.ECODE_ENVIRON)
5037
  if requested > free_mem:
5038
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
5039
                               " needed %s MiB, available %s MiB" %
5040
                               (node, reason, requested, free_mem),
5041
                               errors.ECODE_NORES)
5042

    
5043

    
5044
def _CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
5045
  """Checks if nodes have enough free disk space in the all VGs.
5046

5047
  This function check if all given nodes have the needed amount of
5048
  free disk. In case any node has less disk or we cannot get the
5049
  information from the node, this function raise an OpPrereqError
5050
  exception.
5051

5052
  @type lu: C{LogicalUnit}
5053
  @param lu: a logical unit from which we get configuration data
5054
  @type nodenames: C{list}
5055
  @param nodenames: the list of node names to check
5056
  @type req_sizes: C{dict}
5057
  @param req_sizes: the hash of vg and corresponding amount of disk in
5058
      MiB to check for
5059
  @raise errors.OpPrereqError: if the node doesn't have enough disk,
5060
      or we cannot check the node
5061

5062
  """
5063
  for vg, req_size in req_sizes.items():
5064
    _CheckNodesFreeDiskOnVG(lu, nodenames, vg, req_size)
5065

    
5066

    
5067
def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
5068
  """Checks if nodes have enough free disk space in the specified VG.
5069

5070
  This function check if all given nodes have the needed amount of
5071
  free disk. In case any node has less disk or we cannot get the
5072
  information from the node, this function raise an OpPrereqError
5073
  exception.
5074

5075
  @type lu: C{LogicalUnit}
5076
  @param lu: a logical unit from which we get configuration data
5077
  @type nodenames: C{list}
5078
  @param nodenames: the list of node names to check
5079
  @type vg: C{str}
5080
  @param vg: the volume group to check
5081
  @type requested: C{int}
5082
  @param requested: the amount of disk in MiB to check for
5083
  @raise errors.OpPrereqError: if the node doesn't have enough disk,
5084
      or we cannot check the node
5085

5086
  """
5087
  nodeinfo = lu.rpc.call_node_info(nodenames, vg, None)
5088
  for node in nodenames:
5089
    info = nodeinfo[node]
5090
    info.Raise("Cannot get current information from node %s" % node,
5091
               prereq=True, ecode=errors.ECODE_ENVIRON)
5092
    vg_free = info.payload.get("vg_free", None)
5093
    if not isinstance(vg_free, int):
5094
      raise errors.OpPrereqError("Can't compute free disk space on node"
5095
                                 " %s for vg %s, result was '%s'" %
5096
                                 (node, vg, vg_free), errors.ECODE_ENVIRON)
5097
    if requested > vg_free:
5098
      raise errors.OpPrereqError("Not enough disk space on target node %s"
5099
                                 " vg %s: required %d MiB, available %d MiB" %
5100
                                 (node, vg, requested, vg_free),
5101
                                 errors.ECODE_NORES)
5102

    
5103

    
5104
class LUInstanceStartup(LogicalUnit):
5105
  """Starts an instance.
5106

5107
  """
5108
  HPATH = "instance-start"
5109
  HTYPE = constants.HTYPE_INSTANCE
5110
  REQ_BGL = False
5111

    
5112
  def CheckArguments(self):
5113
    # extra beparams
5114
    if self.op.beparams:
5115
      # fill the beparams dict
5116
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
5117

    
5118
  def ExpandNames(self):
5119
    self._ExpandAndLockInstance()
5120

    
5121
  def BuildHooksEnv(self):
5122
    """Build hooks env.
5123

5124
    This runs on master, primary and secondary nodes of the instance.
5125

5126
    """
5127
    env = {
5128
      "FORCE": self.op.force,
5129
      }
5130
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5131
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5132
    return env, nl, nl
5133

    
5134
  def CheckPrereq(self):
5135
    """Check prerequisites.
5136

5137
    This checks that the instance is in the cluster.
5138

5139
    """
5140
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5141
    assert self.instance is not None, \
5142
      "Cannot retrieve locked instance %s" % self.op.instance_name
5143

    
5144
    # extra hvparams
5145
    if self.op.hvparams:
5146
      # check hypervisor parameter syntax (locally)
5147
      cluster = self.cfg.GetClusterInfo()
5148
      utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
5149
      filled_hvp = cluster.FillHV(instance)
5150
      filled_hvp.update(self.op.hvparams)
5151
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
5152
      hv_type.CheckParameterSyntax(filled_hvp)
5153
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
5154

    
5155
    self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline
5156

    
5157
    if self.primary_offline and self.op.ignore_offline_nodes:
5158
      self.proc.LogWarning("Ignoring offline primary node")
5159

    
5160
      if self.op.hvparams or self.op.beparams:
5161
        self.proc.LogWarning("Overridden parameters are ignored")
5162
    else:
5163
      _CheckNodeOnline(self, instance.primary_node)
5164

    
5165
      bep = self.cfg.GetClusterInfo().FillBE(instance)
5166

    
5167
      # check bridges existence
5168
      _CheckInstanceBridgesExist(self, instance)
5169

    
5170
      remote_info = self.rpc.call_instance_info(instance.primary_node,
5171
                                                instance.name,
5172
                                                instance.hypervisor)
5173
      remote_info.Raise("Error checking node %s" % instance.primary_node,
5174
                        prereq=True, ecode=errors.ECODE_ENVIRON)
5175
      if not remote_info.payload: # not running already
5176
        _CheckNodeFreeMemory(self, instance.primary_node,
5177
                             "starting instance %s" % instance.name,
5178
                             bep[constants.BE_MEMORY], instance.hypervisor)
5179

    
5180
  def Exec(self, feedback_fn):
5181
    """Start the instance.
5182

5183
    """
5184
    instance = self.instance
5185
    force = self.op.force
5186

    
5187
    self.cfg.MarkInstanceUp(instance.name)
5188

    
5189
    if self.primary_offline:
5190
      assert self.op.ignore_offline_nodes
5191
      self.proc.LogInfo("Primary node offline, marked instance as started")
5192
    else:
5193
      node_current = instance.primary_node
5194

    
5195
      _StartInstanceDisks(self, instance, force)
5196

    
5197
      result = self.rpc.call_instance_start(node_current, instance,
5198
                                            self.op.hvparams, self.op.beparams)
5199
      msg = result.fail_msg
5200
      if msg:
5201
        _ShutdownInstanceDisks(self, instance)
5202
        raise errors.OpExecError("Could not start instance: %s" % msg)
5203

    
5204

    
5205
class LUInstanceReboot(LogicalUnit):
5206
  """Reboot an instance.
5207

5208
  """
5209
  HPATH = "instance-reboot"
5210
  HTYPE = constants.HTYPE_INSTANCE
5211
  REQ_BGL = False
5212

    
5213
  def ExpandNames(self):
5214
    self._ExpandAndLockInstance()
5215

    
5216
  def BuildHooksEnv(self):
5217
    """Build hooks env.
5218

5219
    This runs on master, primary and secondary nodes of the instance.
5220

5221
    """
5222
    env = {
5223
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
5224
      "REBOOT_TYPE": self.op.reboot_type,
5225
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5226
      }
5227
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5228
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5229
    return env, nl, nl
5230

    
5231
  def CheckPrereq(self):
5232
    """Check prerequisites.
5233

5234
    This checks that the instance is in the cluster.
5235

5236
    """
5237
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5238
    assert self.instance is not None, \
5239
      "Cannot retrieve locked instance %s" % self.op.instance_name
5240

    
5241
    _CheckNodeOnline(self, instance.primary_node)
5242

    
5243
    # check bridges existence
5244
    _CheckInstanceBridgesExist(self, instance)
5245

    
5246
  def Exec(self, feedback_fn):
5247
    """Reboot the instance.
5248

5249
    """
5250
    instance = self.instance
5251
    ignore_secondaries = self.op.ignore_secondaries
5252
    reboot_type = self.op.reboot_type
5253

    
5254
    remote_info = self.rpc.call_instance_info(instance.primary_node,
5255
                                              instance.name,
5256
                                              instance.hypervisor)
5257
    remote_info.Raise("Error checking node %s" % instance.primary_node)
5258
    instance_running = bool(remote_info.payload)
5259

    
5260
    node_current = instance.primary_node
5261

    
5262
    if instance_running and reboot_type in [constants.INSTANCE_REBOOT_SOFT,
5263
                                            constants.INSTANCE_REBOOT_HARD]:
5264
      for disk in instance.disks:
5265
        self.cfg.SetDiskID(disk, node_current)
5266
      result = self.rpc.call_instance_reboot(node_current, instance,
5267
                                             reboot_type,
5268
                                             self.op.shutdown_timeout)
5269
      result.Raise("Could not reboot instance")
5270
    else:
5271
      if instance_running:
5272
        result = self.rpc.call_instance_shutdown(node_current, instance,
5273
                                                 self.op.shutdown_timeout)
5274
        result.Raise("Could not shutdown instance for full reboot")
5275
        _ShutdownInstanceDisks(self, instance)
5276
      else:
5277
        self.LogInfo("Instance %s was already stopped, starting now",
5278
                     instance.name)
5279
      _StartInstanceDisks(self, instance, ignore_secondaries)
5280
      result = self.rpc.call_instance_start(node_current, instance, None, None)
5281
      msg = result.fail_msg
5282
      if msg:
5283
        _ShutdownInstanceDisks(self, instance)
5284
        raise errors.OpExecError("Could not start instance for"
5285
                                 " full reboot: %s" % msg)
5286

    
5287
    self.cfg.MarkInstanceUp(instance.name)
5288

    
5289

    
5290
class LUInstanceShutdown(LogicalUnit):
5291
  """Shutdown an instance.
5292

5293
  """
5294
  HPATH = "instance-stop"
5295
  HTYPE = constants.HTYPE_INSTANCE
5296
  REQ_BGL = False
5297

    
5298
  def ExpandNames(self):
5299
    self._ExpandAndLockInstance()
5300

    
5301
  def BuildHooksEnv(self):
5302
    """Build hooks env.
5303

5304
    This runs on master, primary and secondary nodes of the instance.
5305

5306
    """
5307
    env = _BuildInstanceHookEnvByObject(self, self.instance)
5308
    env["TIMEOUT"] = self.op.timeout
5309
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5310
    return env, nl, nl
5311

    
5312
  def CheckPrereq(self):
5313
    """Check prerequisites.
5314

5315
    This checks that the instance is in the cluster.
5316

5317
    """
5318
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5319
    assert self.instance is not None, \
5320
      "Cannot retrieve locked instance %s" % self.op.instance_name
5321

    
5322
    self.primary_offline = \
5323
      self.cfg.GetNodeInfo(self.instance.primary_node).offline
5324

    
5325
    if self.primary_offline and self.op.ignore_offline_nodes:
5326
      self.proc.LogWarning("Ignoring offline primary node")
5327
    else:
5328
      _CheckNodeOnline(self, self.instance.primary_node)
5329

    
5330
  def Exec(self, feedback_fn):
5331
    """Shutdown the instance.
5332

5333
    """
5334
    instance = self.instance
5335
    node_current = instance.primary_node
5336
    timeout = self.op.timeout
5337

    
5338
    self.cfg.MarkInstanceDown(instance.name)
5339

    
5340
    if self.primary_offline:
5341
      assert self.op.ignore_offline_nodes
5342
      self.proc.LogInfo("Primary node offline, marked instance as stopped")
5343
    else:
5344
      result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
5345
      msg = result.fail_msg
5346
      if msg:
5347
        self.proc.LogWarning("Could not shutdown instance: %s" % msg)
5348

    
5349
      _ShutdownInstanceDisks(self, instance)
5350

    
5351

    
5352
class LUInstanceReinstall(LogicalUnit):
5353
  """Reinstall an instance.
5354

5355
  """
5356
  HPATH = "instance-reinstall"
5357
  HTYPE = constants.HTYPE_INSTANCE
5358
  REQ_BGL = False
5359

    
5360
  def ExpandNames(self):
5361
    self._ExpandAndLockInstance()
5362

    
5363
  def BuildHooksEnv(self):
5364
    """Build hooks env.
5365

5366
    This runs on master, primary and secondary nodes of the instance.
5367

5368
    """
5369
    env = _BuildInstanceHookEnvByObject(self, self.instance)
5370
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5371
    return env, nl, nl
5372

    
5373
  def CheckPrereq(self):
5374
    """Check prerequisites.
5375

5376
    This checks that the instance is in the cluster and is not running.
5377

5378
    """
5379
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5380
    assert instance is not None, \
5381
      "Cannot retrieve locked instance %s" % self.op.instance_name
5382
    _CheckNodeOnline(self, instance.primary_node, "Instance primary node"
5383
                     " offline, cannot reinstall")
5384
    for node in instance.secondary_nodes:
5385
      _CheckNodeOnline(self, node, "Instance secondary node offline,"
5386
                       " cannot reinstall")
5387

    
5388
    if instance.disk_template == constants.DT_DISKLESS:
5389
      raise errors.OpPrereqError("Instance '%s' has no disks" %
5390
                                 self.op.instance_name,
5391
                                 errors.ECODE_INVAL)
5392
    _CheckInstanceDown(self, instance, "cannot reinstall")
5393

    
5394
    if self.op.os_type is not None:
5395
      # OS verification
5396
      pnode = _ExpandNodeName(self.cfg, instance.primary_node)
5397
      _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
5398
      instance_os = self.op.os_type
5399
    else:
5400
      instance_os = instance.os
5401

    
5402
    nodelist = list(instance.all_nodes)
5403

    
5404
    if self.op.osparams:
5405
      i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
5406
      _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
5407
      self.os_inst = i_osdict # the new dict (without defaults)
5408
    else:
5409
      self.os_inst = None
5410

    
5411
    self.instance = instance
5412

    
5413
  def Exec(self, feedback_fn):
5414
    """Reinstall the instance.
5415

5416
    """
5417
    inst = self.instance
5418

    
5419
    if self.op.os_type is not None:
5420
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
5421
      inst.os = self.op.os_type
5422
      # Write to configuration
5423
      self.cfg.Update(inst, feedback_fn)
5424

    
5425
    _StartInstanceDisks(self, inst, None)
5426
    try:
5427
      feedback_fn("Running the instance OS create scripts...")
5428
      # FIXME: pass debug option from opcode to backend
5429
      result = self.rpc.call_instance_os_add(inst.primary_node, inst, True,
5430
                                             self.op.debug_level,
5431
                                             osparams=self.os_inst)
5432
      result.Raise("Could not install OS for instance %s on node %s" %
5433
                   (inst.name, inst.primary_node))
5434
    finally:
5435
      _ShutdownInstanceDisks(self, inst)
5436

    
5437

    
5438
class LUInstanceRecreateDisks(LogicalUnit):
5439
  """Recreate an instance's missing disks.
5440

5441
  """
5442
  HPATH = "instance-recreate-disks"
5443
  HTYPE = constants.HTYPE_INSTANCE
5444
  REQ_BGL = False
5445

    
5446
  def ExpandNames(self):
5447
    self._ExpandAndLockInstance()
5448

    
5449
  def BuildHooksEnv(self):
5450
    """Build hooks env.
5451

5452
    This runs on master, primary and secondary nodes of the instance.
5453

5454
    """
5455
    env = _BuildInstanceHookEnvByObject(self, self.instance)
5456
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5457
    return env, nl, nl
5458

    
5459
  def CheckPrereq(self):
5460
    """Check prerequisites.
5461

5462
    This checks that the instance is in the cluster and is not running.
5463

5464
    """
5465
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5466
    assert instance is not None, \
5467
      "Cannot retrieve locked instance %s" % self.op.instance_name
5468
    _CheckNodeOnline(self, instance.primary_node)
5469

    
5470
    if instance.disk_template == constants.DT_DISKLESS:
5471
      raise errors.OpPrereqError("Instance '%s' has no disks" %
5472
                                 self.op.instance_name, errors.ECODE_INVAL)
5473
    _CheckInstanceDown(self, instance, "cannot recreate disks")
5474

    
5475
    if not self.op.disks:
5476
      self.op.disks = range(len(instance.disks))
5477
    else:
5478
      for idx in self.op.disks:
5479
        if idx >= len(instance.disks):
5480
          raise errors.OpPrereqError("Invalid disk index passed '%s'" % idx,
5481
                                     errors.ECODE_INVAL)
5482

    
5483
    self.instance = instance
5484

    
5485
  def Exec(self, feedback_fn):
5486
    """Recreate the disks.
5487

5488
    """
5489
    to_skip = []
5490
    for idx, _ in enumerate(self.instance.disks):
5491
      if idx not in self.op.disks: # disk idx has not been passed in
5492
        to_skip.append(idx)
5493
        continue
5494

    
5495
    _CreateDisks(self, self.instance, to_skip=to_skip)
5496

    
5497

    
5498
class LUInstanceRename(LogicalUnit):
5499
  """Rename an instance.
5500

5501
  """
5502
  HPATH = "instance-rename"
5503
  HTYPE = constants.HTYPE_INSTANCE
5504

    
5505
  def CheckArguments(self):
5506
    """Check arguments.
5507

5508
    """
5509
    if self.op.ip_check and not self.op.name_check:
5510
      # TODO: make the ip check more flexible and not depend on the name check
5511
      raise errors.OpPrereqError("Cannot do ip check without a name check",
5512
                                 errors.ECODE_INVAL)
5513

    
5514
  def BuildHooksEnv(self):
5515
    """Build hooks env.
5516

5517
    This runs on master, primary and secondary nodes of the instance.
5518

5519
    """
5520
    env = _BuildInstanceHookEnvByObject(self, self.instance)
5521
    env["INSTANCE_NEW_NAME"] = self.op.new_name
5522
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5523
    return env, nl, nl
5524

    
5525
  def CheckPrereq(self):
5526
    """Check prerequisites.
5527

5528
    This checks that the instance is in the cluster and is not running.
5529

5530
    """
5531
    self.op.instance_name = _ExpandInstanceName(self.cfg,
5532
                                                self.op.instance_name)
5533
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5534
    assert instance is not None
5535
    _CheckNodeOnline(self, instance.primary_node)
5536
    _CheckInstanceDown(self, instance, "cannot rename")
5537
    self.instance = instance
5538

    
5539
    new_name = self.op.new_name
5540
    if self.op.name_check:
5541
      hostname = netutils.GetHostname(name=new_name)
5542
      self.LogInfo("Resolved given name '%s' to '%s'", new_name,
5543
                   hostname.name)
5544
      if not utils.MatchNameComponent(self.op.new_name, [hostname.name]):
5545
        raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
5546
                                    " same as given hostname '%s'") %
5547
                                    (hostname.name, self.op.new_name),
5548
                                    errors.ECODE_INVAL)
5549
      new_name = self.op.new_name = hostname.name
5550
      if (self.op.ip_check and
5551
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
5552
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
5553
                                   (hostname.ip, new_name),
5554
                                   errors.ECODE_NOTUNIQUE)
5555

    
5556
    instance_list = self.cfg.GetInstanceList()
5557
    if new_name in instance_list and new_name != instance.name:
5558
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
5559
                                 new_name, errors.ECODE_EXISTS)
5560

    
5561
  def Exec(self, feedback_fn):
5562
    """Rename the instance.
5563

5564
    """
5565
    inst = self.instance
5566
    old_name = inst.name
5567

    
5568
    rename_file_storage = False
5569
    if (inst.disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE) and
5570
        self.op.new_name != inst.name):
5571
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
5572
      rename_file_storage = True
5573

    
5574
    self.cfg.RenameInstance(inst.name, self.op.new_name)
5575
    # Change the instance lock. This is definitely safe while we hold the BGL
5576
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
5577
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
5578

    
5579
    # re-read the instance from the configuration after rename
5580
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
5581

    
5582
    if rename_file_storage:
5583
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
5584
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
5585
                                                     old_file_storage_dir,
5586
                                                     new_file_storage_dir)
5587
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
5588
                   " (but the instance has been renamed in Ganeti)" %
5589
                   (inst.primary_node, old_file_storage_dir,
5590
                    new_file_storage_dir))
5591

    
5592
    _StartInstanceDisks(self, inst, None)
5593
    try:
5594
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
5595
                                                 old_name, self.op.debug_level)
5596
      msg = result.fail_msg
5597
      if msg:
5598
        msg = ("Could not run OS rename script for instance %s on node %s"
5599
               " (but the instance has been renamed in Ganeti): %s" %
5600
               (inst.name, inst.primary_node, msg))
5601
        self.proc.LogWarning(msg)
5602
    finally:
5603
      _ShutdownInstanceDisks(self, inst)
5604

    
5605
    return inst.name
5606

    
5607

    
5608
class LUInstanceRemove(LogicalUnit):
5609
  """Remove an instance.
5610

5611
  """
5612
  HPATH = "instance-remove"
5613
  HTYPE = constants.HTYPE_INSTANCE
5614
  REQ_BGL = False
5615

    
5616
  def ExpandNames(self):
5617
    self._ExpandAndLockInstance()
5618
    self.needed_locks[locking.LEVEL_NODE] = []
5619
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5620

    
5621
  def DeclareLocks(self, level):
5622
    if level == locking.LEVEL_NODE:
5623
      self._LockInstancesNodes()
5624

    
5625
  def BuildHooksEnv(self):
5626
    """Build hooks env.
5627

5628
    This runs on master, primary and secondary nodes of the instance.
5629

5630
    """
5631
    env = _BuildInstanceHookEnvByObject(self, self.instance)
5632
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
5633
    nl = [self.cfg.GetMasterNode()]
5634
    nl_post = list(self.instance.all_nodes) + nl
5635
    return env, nl, nl_post
5636

    
5637
  def CheckPrereq(self):
5638
    """Check prerequisites.
5639

5640
    This checks that the instance is in the cluster.
5641

5642
    """
5643
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5644
    assert self.instance is not None, \
5645
      "Cannot retrieve locked instance %s" % self.op.instance_name
5646

    
5647
  def Exec(self, feedback_fn):
5648
    """Remove the instance.
5649

5650
    """
5651
    instance = self.instance
5652
    logging.info("Shutting down instance %s on node %s",
5653
                 instance.name, instance.primary_node)
5654

    
5655
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
5656
                                             self.op.shutdown_timeout)
5657
    msg = result.fail_msg
5658
    if msg:
5659
      if self.op.ignore_failures:
5660
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
5661
      else:
5662
        raise errors.OpExecError("Could not shutdown instance %s on"
5663
                                 " node %s: %s" %
5664
                                 (instance.name, instance.primary_node, msg))
5665

    
5666
    _RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
5667

    
5668

    
5669
def _RemoveInstance(lu, feedback_fn, instance, ignore_failures):
5670
  """Utility function to remove an instance.
5671

5672
  """
5673
  logging.info("Removing block devices for instance %s", instance.name)
5674

    
5675
  if not _RemoveDisks(lu, instance):
5676
    if not ignore_failures:
5677
      raise errors.OpExecError("Can't remove instance's disks")
5678
    feedback_fn("Warning: can't remove instance's disks")
5679

    
5680
  logging.info("Removing instance %s out of cluster config", instance.name)
5681

    
5682
  lu.cfg.RemoveInstance(instance.name)
5683

    
5684
  assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
5685
    "Instance lock removal conflict"
5686

    
5687
  # Remove lock for the instance
5688
  lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
5689

    
5690

    
5691
class LUInstanceQuery(NoHooksLU):
5692
  """Logical unit for querying instances.
5693

5694
  """
5695
  # pylint: disable-msg=W0142
5696
  REQ_BGL = False
5697

    
5698
  def CheckArguments(self):
5699
    self.iq = _InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
5700
                             self.op.output_fields, self.op.use_locking)
5701

    
5702
  def ExpandNames(self):
5703
    self.iq.ExpandNames(self)
5704

    
5705
  def DeclareLocks(self, level):
5706
    self.iq.DeclareLocks(self, level)
5707

    
5708
  def Exec(self, feedback_fn):
5709
    return self.iq.OldStyleQuery(self)
5710

    
5711

    
5712
class LUInstanceFailover(LogicalUnit):
5713
  """Failover an instance.
5714

5715
  """
5716
  HPATH = "instance-failover"
5717
  HTYPE = constants.HTYPE_INSTANCE
5718
  REQ_BGL = False
5719

    
5720
  def CheckArguments(self):
5721
    """Check the arguments.
5722

5723
    """
5724
    self.iallocator = getattr(self.op, "iallocator", None)
5725
    self.target_node = getattr(self.op, "target_node", None)
5726

    
5727
  def ExpandNames(self):
5728
    self._ExpandAndLockInstance()
5729

    
5730
    if self.op.target_node is not None:
5731
      self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
5732

    
5733
    self.needed_locks[locking.LEVEL_NODE] = []
5734
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5735

    
5736
  def DeclareLocks(self, level):
5737
    if level == locking.LEVEL_NODE:
5738
      instance = self.context.cfg.GetInstanceInfo(self.op.instance_name)
5739
      if instance.disk_template in constants.DTS_EXT_MIRROR:
5740
        if self.op.target_node is None:
5741
          self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5742
        else:
5743
          self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
5744
                                                   self.op.target_node]
5745
        del self.recalculate_locks[locking.LEVEL_NODE]
5746
      else:
5747
        self._LockInstancesNodes()
5748

    
5749
  def BuildHooksEnv(self):
5750
    """Build hooks env.
5751

5752
    This runs on master, primary and secondary nodes of the instance.
5753

5754
    """
5755
    instance = self.instance
5756
    source_node = instance.primary_node
5757
    env = {
5758
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
5759
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5760
      "OLD_PRIMARY": source_node,
5761
      "NEW_PRIMARY": self.op.target_node,
5762
      }
5763

    
5764
    if instance.disk_template in constants.DTS_INT_MIRROR:
5765
      env["OLD_SECONDARY"] = instance.secondary_nodes[0]
5766
      env["NEW_SECONDARY"] = source_node
5767
    else:
5768
      env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = ""
5769

    
5770
    env.update(_BuildInstanceHookEnvByObject(self, instance))
5771
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
5772
    nl_post = list(nl)
5773
    nl_post.append(source_node)
5774
    return env, nl, nl_post
5775

    
5776
  def CheckPrereq(self):
5777
    """Check prerequisites.
5778

5779
    This checks that the instance is in the cluster.
5780

5781
    """
5782
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5783
    assert self.instance is not None, \
5784
      "Cannot retrieve locked instance %s" % self.op.instance_name
5785

    
5786
    bep = self.cfg.GetClusterInfo().FillBE(instance)
5787
    if instance.disk_template not in constants.DTS_MIRRORED:
5788
      raise errors.OpPrereqError("Instance's disk layout is not"
5789
                                 " mirrored, cannot failover.",
5790
                                 errors.ECODE_STATE)
5791

    
5792
    if instance.disk_template in constants.DTS_EXT_MIRROR:
5793
      _CheckIAllocatorOrNode(self, "iallocator", "target_node")
5794
      if self.op.iallocator:
5795
        self._RunAllocator()
5796
        # Release all unnecessary node locks
5797
        nodes_keep = [instance.primary_node, self.op.target_node]
5798
        nodes_rel = [node for node in self.acquired_locks[locking.LEVEL_NODE]
5799
                     if node not in nodes_keep]
5800
        self.context.glm.release(locking.LEVEL_NODE, nodes_rel)
5801
        self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
5802

    
5803
      # self.op.target_node is already populated, either directly or by the
5804
      # iallocator run
5805
      target_node = self.op.target_node
5806

    
5807
    else:
5808
      secondary_nodes = instance.secondary_nodes
5809
      if not secondary_nodes:
5810
        raise errors.ConfigurationError("No secondary node but using"
5811
                                        " %s disk template" %
5812
                                        instance.disk_template)
5813
      target_node = secondary_nodes[0]
5814

    
5815
      if self.op.iallocator or (self.op.target_node and
5816
                                self.op.target_node != target_node):
5817
        raise errors.OpPrereqError("Instances with disk template %s cannot"
5818
                                   " be failed over to arbitrary nodes"
5819
                                   " (neither an iallocator nor a target"
5820
                                   " node can be passed)" %
5821
                                   instance.disk_template, errors.ECODE_INVAL)
5822
    _CheckNodeOnline(self, target_node)
5823
    _CheckNodeNotDrained(self, target_node)
5824

    
5825
    # Save target_node so that we can use it in BuildHooksEnv
5826
    self.op.target_node = target_node
5827

    
5828
    if instance.admin_up:
5829
      # check memory requirements on the secondary node
5830
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
5831
                           instance.name, bep[constants.BE_MEMORY],
5832
                           instance.hypervisor)
5833
    else:
5834
      self.LogInfo("Not checking memory on the secondary node as"
5835
                   " instance will not be started")
5836

    
5837
    # check bridge existance
5838
    _CheckInstanceBridgesExist(self, instance, node=target_node)
5839

    
5840
  def Exec(self, feedback_fn):
5841
    """Failover an instance.
5842

5843
    The failover is done by shutting it down on its present node and
5844
    starting it on the secondary.
5845

5846
    """
5847
    instance = self.instance
5848
    primary_node = self.cfg.GetNodeInfo(instance.primary_node)
5849

    
5850
    source_node = instance.primary_node
5851
    target_node = self.op.target_node
5852

    
5853
    if instance.admin_up:
5854
      feedback_fn("* checking disk consistency between source and target")
5855
      for dev in instance.disks:
5856
        # for drbd, these are drbd over lvm
5857
        if not _CheckDiskConsistency(self, dev, target_node, False):
5858
          if not self.op.ignore_consistency:
5859
            raise errors.OpExecError("Disk %s is degraded on target node,"
5860
                                     " aborting failover." % dev.iv_name)
5861
    else:
5862
      feedback_fn("* not checking disk consistency as instance is not running")
5863

    
5864
    feedback_fn("* shutting down instance on source node")
5865
    logging.info("Shutting down instance %s on node %s",
5866
                 instance.name, source_node)
5867

    
5868
    result = self.rpc.call_instance_shutdown(source_node, instance,
5869
                                             self.op.shutdown_timeout)
5870
    msg = result.fail_msg
5871
    if msg:
5872
      if self.op.ignore_consistency or primary_node.offline:
5873
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
5874
                             " Proceeding anyway. Please make sure node"
5875
                             " %s is down. Error details: %s",
5876
                             instance.name, source_node, source_node, msg)
5877
      else:
5878
        raise errors.OpExecError("Could not shutdown instance %s on"
5879
                                 " node %s: %s" %
5880
                                 (instance.name, source_node, msg))
5881

    
5882
    feedback_fn("* deactivating the instance's disks on source node")
5883
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
5884
      raise errors.OpExecError("Can't shut down the instance's disks.")
5885

    
5886
    instance.primary_node = target_node
5887
    # distribute new instance config to the other nodes
5888
    self.cfg.Update(instance, feedback_fn)
5889

    
5890
    # Only start the instance if it's marked as up
5891
    if instance.admin_up:
5892
      feedback_fn("* activating the instance's disks on target node")
5893
      logging.info("Starting instance %s on node %s",
5894
                   instance.name, target_node)
5895

    
5896
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
5897
                                           ignore_secondaries=True)
5898
      if not disks_ok:
5899
        _ShutdownInstanceDisks(self, instance)
5900
        raise errors.OpExecError("Can't activate the instance's disks")
5901

    
5902
      feedback_fn("* starting the instance on the target node")
5903
      result = self.rpc.call_instance_start(target_node, instance, None, None)
5904
      msg = result.fail_msg
5905
      if msg:
5906
        _ShutdownInstanceDisks(self, instance)
5907
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
5908
                                 (instance.name, target_node, msg))
5909

    
5910
  def _RunAllocator(self):
5911
    """Run the allocator based on input opcode.
5912

5913
    """
5914
    ial = IAllocator(self.cfg, self.rpc,
5915
                     mode=constants.IALLOCATOR_MODE_RELOC,
5916
                     name=self.instance.name,
5917
                     # TODO See why hail breaks with a single node below
5918
                     relocate_from=[self.instance.primary_node,
5919
                                    self.instance.primary_node],
5920
                     )
5921

    
5922
    ial.Run(self.op.iallocator)
5923

    
5924
    if not ial.success:
5925
      raise errors.OpPrereqError("Can't compute nodes using"
5926
                                 " iallocator '%s': %s" %
5927
                                 (self.op.iallocator, ial.info),
5928
                                 errors.ECODE_NORES)
5929
    if len(ial.result) != ial.required_nodes:
5930
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
5931
                                 " of nodes (%s), required %s" %
5932
                                 (self.op.iallocator, len(ial.result),
5933
                                  ial.required_nodes), errors.ECODE_FAULT)
5934
    self.op.target_node = ial.result[0]
5935
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
5936
                 self.instance.name, self.op.iallocator,
5937
                 utils.CommaJoin(ial.result))
5938

    
5939

    
5940
class LUInstanceMigrate(LogicalUnit):
5941
  """Migrate an instance.
5942

5943
  This is migration without shutting down, compared to the failover,
5944
  which is done with shutdown.
5945

5946
  """
5947
  HPATH = "instance-migrate"
5948
  HTYPE = constants.HTYPE_INSTANCE
5949
  REQ_BGL = False
5950

    
5951
  def ExpandNames(self):
5952
    self._ExpandAndLockInstance()
5953

    
5954
    if self.op.target_node is not None:
5955
      self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
5956

    
5957
    self.needed_locks[locking.LEVEL_NODE] = []
5958
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5959

    
5960
    self._migrater = TLMigrateInstance(self, self.op.instance_name,
5961
                                       self.op.cleanup, self.op.iallocator,
5962
                                       self.op.target_node)
5963
    self.tasklets = [self._migrater]
5964

    
5965
  def DeclareLocks(self, level):
5966
    if level == locking.LEVEL_NODE:
5967
      instance = self.context.cfg.GetInstanceInfo(self.op.instance_name)
5968
      if instance.disk_template in constants.DTS_EXT_MIRROR:
5969
        if self.op.target_node is None:
5970
          self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5971
        else:
5972
          self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
5973
                                                   self.op.target_node]
5974
        del self.recalculate_locks[locking.LEVEL_NODE]
5975
      else:
5976
        self._LockInstancesNodes()
5977

    
5978
  def BuildHooksEnv(self):
5979
    """Build hooks env.
5980

5981
    This runs on master, primary and secondary nodes of the instance.
5982

5983
    """
5984
    instance = self._migrater.instance
5985
    source_node = instance.primary_node
5986
    target_node = self._migrater.target_node
5987
    env = _BuildInstanceHookEnvByObject(self, instance)
5988
    env["MIGRATE_LIVE"] = self._migrater.live
5989
    env["MIGRATE_CLEANUP"] = self.op.cleanup
5990
    env.update({
5991
        "OLD_PRIMARY": source_node,
5992
        "NEW_PRIMARY": target_node,
5993
        })
5994

    
5995
    if instance.disk_template in constants.DTS_INT_MIRROR:
5996
      env["OLD_SECONDARY"] = target_node
5997
      env["NEW_SECONDARY"] = source_node
5998
    else:
5999
      env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = None
6000

    
6001
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
6002
    nl_post = list(nl)
6003
    nl_post.append(source_node)
6004
    return env, nl, nl_post
6005

    
6006

    
6007
class LUInstanceMove(LogicalUnit):
6008
  """Move an instance by data-copying.
6009

6010
  """
6011
  HPATH = "instance-move"
6012
  HTYPE = constants.HTYPE_INSTANCE
6013
  REQ_BGL = False
6014

    
6015
  def ExpandNames(self):
6016
    self._ExpandAndLockInstance()
6017
    target_node = _ExpandNodeName(self.cfg, self.op.target_node)
6018
    self.op.target_node = target_node
6019
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
6020
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6021

    
6022
  def DeclareLocks(self, level):
6023
    if level == locking.LEVEL_NODE:
6024
      self._LockInstancesNodes(primary_only=True)
6025

    
6026
  def BuildHooksEnv(self):
6027
    """Build hooks env.
6028

6029
    This runs on master, primary and secondary nodes of the instance.
6030

6031
    """
6032
    env = {
6033
      "TARGET_NODE": self.op.target_node,
6034
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
6035
      }
6036
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6037
    nl = [self.cfg.GetMasterNode()] + [self.instance.primary_node,
6038
                                       self.op.target_node]
6039
    return env, nl, nl
6040

    
6041
  def CheckPrereq(self):
6042
    """Check prerequisites.
6043

6044
    This checks that the instance is in the cluster.
6045

6046
    """
6047
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6048
    assert self.instance is not None, \
6049
      "Cannot retrieve locked instance %s" % self.op.instance_name
6050

    
6051
    node = self.cfg.GetNodeInfo(self.op.target_node)
6052
    assert node is not None, \
6053
      "Cannot retrieve locked node %s" % self.op.target_node
6054

    
6055
    self.target_node = target_node = node.name
6056

    
6057
    if target_node == instance.primary_node:
6058
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
6059
                                 (instance.name, target_node),
6060
                                 errors.ECODE_STATE)
6061

    
6062
    bep = self.cfg.GetClusterInfo().FillBE(instance)
6063

    
6064
    for idx, dsk in enumerate(instance.disks):
6065
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
6066
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
6067
                                   " cannot copy" % idx, errors.ECODE_STATE)
6068

    
6069
    _CheckNodeOnline(self, target_node)
6070
    _CheckNodeNotDrained(self, target_node)
6071
    _CheckNodeVmCapable(self, target_node)
6072

    
6073
    if instance.admin_up:
6074
      # check memory requirements on the secondary node
6075
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
6076
                           instance.name, bep[constants.BE_MEMORY],
6077
                           instance.hypervisor)
6078
    else:
6079
      self.LogInfo("Not checking memory on the secondary node as"
6080
                   " instance will not be started")
6081

    
6082
    # check bridge existance
6083
    _CheckInstanceBridgesExist(self, instance, node=target_node)
6084

    
6085
  def Exec(self, feedback_fn):
6086
    """Move an instance.
6087

6088
    The move is done by shutting it down on its present node, copying
6089
    the data over (slow) and starting it on the new node.
6090

6091
    """
6092
    instance = self.instance
6093

    
6094
    source_node = instance.primary_node
6095
    target_node = self.target_node
6096

    
6097
    self.LogInfo("Shutting down instance %s on source node %s",
6098
                 instance.name, source_node)
6099

    
6100
    result = self.rpc.call_instance_shutdown(source_node, instance,
6101
                                             self.op.shutdown_timeout)
6102
    msg = result.fail_msg
6103
    if msg:
6104
      if self.op.ignore_consistency:
6105
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
6106
                             " Proceeding anyway. Please make sure node"
6107
                             " %s is down. Error details: %s",
6108
                             instance.name, source_node, source_node, msg)
6109
      else:
6110
        raise errors.OpExecError("Could not shutdown instance %s on"
6111
                                 " node %s: %s" %
6112
                                 (instance.name, source_node, msg))
6113

    
6114
    # create the target disks
6115
    try:
6116
      _CreateDisks(self, instance, target_node=target_node)
6117
    except errors.OpExecError:
6118
      self.LogWarning("Device creation failed, reverting...")
6119
      try:
6120
        _RemoveDisks(self, instance, target_node=target_node)
6121
      finally:
6122
        self.cfg.ReleaseDRBDMinors(instance.name)
6123
        raise
6124

    
6125
    cluster_name = self.cfg.GetClusterInfo().cluster_name
6126

    
6127
    errs = []
6128
    # activate, get path, copy the data over
6129
    for idx, disk in enumerate(instance.disks):
6130
      self.LogInfo("Copying data for disk %d", idx)
6131
      result = self.rpc.call_blockdev_assemble(target_node, disk,
6132
                                               instance.name, True, idx)
6133
      if result.fail_msg:
6134
        self.LogWarning("Can't assemble newly created disk %d: %s",
6135
                        idx, result.fail_msg)
6136
        errs.append(result.fail_msg)
6137
        break
6138
      dev_path = result.payload
6139
      result = self.rpc.call_blockdev_export(source_node, disk,
6140
                                             target_node, dev_path,
6141
                                             cluster_name)
6142
      if result.fail_msg:
6143
        self.LogWarning("Can't copy data over for disk %d: %s",
6144
                        idx, result.fail_msg)
6145
        errs.append(result.fail_msg)
6146
        break
6147

    
6148
    if errs:
6149
      self.LogWarning("Some disks failed to copy, aborting")
6150
      try:
6151
        _RemoveDisks(self, instance, target_node=target_node)
6152
      finally:
6153
        self.cfg.ReleaseDRBDMinors(instance.name)
6154
        raise errors.OpExecError("Errors during disk copy: %s" %
6155
                                 (",".join(errs),))
6156

    
6157
    instance.primary_node = target_node
6158
    self.cfg.Update(instance, feedback_fn)
6159

    
6160
    self.LogInfo("Removing the disks on the original node")
6161
    _RemoveDisks(self, instance, target_node=source_node)
6162

    
6163
    # Only start the instance if it's marked as up
6164
    if instance.admin_up:
6165
      self.LogInfo("Starting instance %s on node %s",
6166
                   instance.name, target_node)
6167

    
6168
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
6169
                                           ignore_secondaries=True)
6170
      if not disks_ok:
6171
        _ShutdownInstanceDisks(self, instance)
6172
        raise errors.OpExecError("Can't activate the instance's disks")
6173

    
6174
      result = self.rpc.call_instance_start(target_node, instance, None, None)
6175
      msg = result.fail_msg
6176
      if msg:
6177
        _ShutdownInstanceDisks(self, instance)
6178
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
6179
                                 (instance.name, target_node, msg))
6180

    
6181

    
6182
class LUNodeMigrate(LogicalUnit):
6183
  """Migrate all instances from a node.
6184

6185
  """
6186
  HPATH = "node-migrate"
6187
  HTYPE = constants.HTYPE_NODE
6188
  REQ_BGL = False
6189

    
6190
  def CheckArguments(self):
6191
    _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
6192

    
6193
  def ExpandNames(self):
6194
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
6195

    
6196
    self.needed_locks = {}
6197

    
6198
    # Create tasklets for migrating instances for all instances on this node
6199
    names = []
6200
    tasklets = []
6201

    
6202
    self.lock_all_nodes = False
6203

    
6204
    for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name):
6205
      logging.debug("Migrating instance %s", inst.name)
6206
      names.append(inst.name)
6207

    
6208
      tasklets.append(TLMigrateInstance(self, inst.name, False,
6209
                                        self.op.iallocator, None))
6210

    
6211
      if inst.disk_template in constants.DTS_EXT_MIRROR:
6212
        # We need to lock all nodes, as the iallocator will choose the
6213
        # destination nodes afterwards
6214
        self.lock_all_nodes = True
6215

    
6216
    self.tasklets = tasklets
6217

    
6218
    # Declare node locks
6219
    if self.lock_all_nodes:
6220
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6221
    else:
6222
      self.needed_locks[locking.LEVEL_NODE] = [self.op.node_name]
6223
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6224

    
6225
    # Declare instance locks
6226
    self.needed_locks[locking.LEVEL_INSTANCE] = names
6227

    
6228
  def DeclareLocks(self, level):
6229
    if level == locking.LEVEL_NODE and not self.lock_all_nodes:
6230
      self._LockInstancesNodes()
6231

    
6232
  def BuildHooksEnv(self):
6233
    """Build hooks env.
6234

6235
    This runs on the master, the primary and all the secondaries.
6236

6237
    """
6238
    env = {
6239
      "NODE_NAME": self.op.node_name,
6240
      }
6241

    
6242
    nl = [self.cfg.GetMasterNode()]
6243

    
6244
    return (env, nl, nl)
6245

    
6246

    
6247
class TLMigrateInstance(Tasklet):
6248
  """Tasklet class for instance migration.
6249

6250
  @type live: boolean
6251
  @ivar live: whether the migration will be done live or non-live;
6252
      this variable is initalized only after CheckPrereq has run
6253

6254
  """
6255
  def __init__(self, lu, instance_name, cleanup,
6256
               iallocator=None, target_node=None):
6257
    """Initializes this class.
6258

6259
    """
6260
    Tasklet.__init__(self, lu)
6261

    
6262
    # Parameters
6263
    self.instance_name = instance_name
6264
    self.cleanup = cleanup
6265
    self.live = False # will be overridden later
6266
    self.iallocator = iallocator
6267
    self.target_node = target_node
6268

    
6269
  def CheckPrereq(self):
6270
    """Check prerequisites.
6271

6272
    This checks that the instance is in the cluster.
6273

6274
    """
6275
    instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
6276
    instance = self.cfg.GetInstanceInfo(instance_name)
6277
    assert instance is not None
6278
    self.instance = instance
6279

    
6280
    if instance.disk_template not in constants.DTS_MIRRORED:
6281
      raise errors.OpPrereqError("Instance's disk layout '%s' does not allow"
6282
                                 " migrations" % instance.disk_template,
6283
                                 errors.ECODE_STATE)
6284

    
6285
    if instance.disk_template in constants.DTS_EXT_MIRROR:
6286
      _CheckIAllocatorOrNode(self.lu, "iallocator", "target_node")
6287

    
6288
      if self.iallocator:
6289
        self._RunAllocator()
6290

    
6291
      # self.target_node is already populated, either directly or by the
6292
      # iallocator run
6293
      target_node = self.target_node
6294

    
6295
      if len(self.lu.tasklets) == 1:
6296
        # It is safe to remove locks only when we're the only tasklet in the LU
6297
        nodes_keep = [instance.primary_node, self.target_node]
6298
        nodes_rel = [node for node in self.lu.acquired_locks[locking.LEVEL_NODE]
6299
                     if node not in nodes_keep]
6300
        self.lu.context.glm.release(locking.LEVEL_NODE, nodes_rel)
6301
        self.lu.acquired_locks[locking.LEVEL_NODE] = nodes_keep
6302

    
6303
    else:
6304
      secondary_nodes = instance.secondary_nodes
6305
      if not secondary_nodes:
6306
        raise errors.ConfigurationError("No secondary node but using"
6307
                                        " %s disk template" %
6308
                                        instance.disk_template)
6309
      target_node = secondary_nodes[0]
6310
      if self.lu.op.iallocator or (self.lu.op.target_node and
6311
                                   self.lu.op.target_node != target_node):
6312
        raise errors.OpPrereqError("Instances with disk template %s cannot"
6313
                                   " be migrated over to arbitrary nodes"
6314
                                   " (neither an iallocator nor a target"
6315
                                   " node can be passed)" %
6316
                                   instance.disk_template, errors.ECODE_INVAL)
6317

    
6318
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
6319

    
6320
    # check memory requirements on the secondary node
6321
    _CheckNodeFreeMemory(self.lu, target_node, "migrating instance %s" %
6322
                         instance.name, i_be[constants.BE_MEMORY],
6323
                         instance.hypervisor)
6324

    
6325
    # check bridge existance
6326
    _CheckInstanceBridgesExist(self.lu, instance, node=target_node)
6327

    
6328
    if not self.cleanup:
6329
      _CheckNodeNotDrained(self.lu, target_node)
6330
      result = self.rpc.call_instance_migratable(instance.primary_node,
6331
                                                 instance)
6332
      result.Raise("Can't migrate, please use failover",
6333
                   prereq=True, ecode=errors.ECODE_STATE)
6334

    
6335

    
6336
  def _RunAllocator(self):
6337
    """Run the allocator based on input opcode.
6338

6339
    """
6340
    ial = IAllocator(self.cfg, self.rpc,
6341
                     mode=constants.IALLOCATOR_MODE_RELOC,
6342
                     name=self.instance_name,
6343
                     # TODO See why hail breaks with a single node below
6344
                     relocate_from=[self.instance.primary_node,
6345
                                    self.instance.primary_node],
6346
                     )
6347

    
6348
    ial.Run(self.iallocator)
6349

    
6350
    if not ial.success:
6351
      raise errors.OpPrereqError("Can't compute nodes using"
6352
                                 " iallocator '%s': %s" %
6353
                                 (self.iallocator, ial.info),
6354
                                 errors.ECODE_NORES)
6355
    if len(ial.result) != ial.required_nodes:
6356
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
6357
                                 " of nodes (%s), required %s" %
6358
                                 (self.iallocator, len(ial.result),
6359
                                  ial.required_nodes), errors.ECODE_FAULT)
6360
    self.target_node = ial.result[0]
6361
    self.lu.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
6362
                 self.instance_name, self.iallocator,
6363
                 utils.CommaJoin(ial.result))
6364

    
6365
    if self.lu.op.live is not None and self.lu.op.mode is not None:
6366
      raise errors.OpPrereqError("Only one of the 'live' and 'mode'"
6367
                                 " parameters are accepted",
6368
                                 errors.ECODE_INVAL)
6369
    if self.lu.op.live is not None:
6370
      if self.lu.op.live:
6371
        self.lu.op.mode = constants.HT_MIGRATION_LIVE
6372
      else:
6373
        self.lu.op.mode = constants.HT_MIGRATION_NONLIVE
6374
      # reset the 'live' parameter to None so that repeated
6375
      # invocations of CheckPrereq do not raise an exception
6376
      self.lu.op.live = None
6377
    elif self.lu.op.mode is None:
6378
      # read the default value from the hypervisor
6379
      i_hv = self.cfg.GetClusterInfo().FillHV(self.instance, skip_globals=False)
6380
      self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
6381

    
6382
    self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
6383

    
6384
  def _WaitUntilSync(self):
6385
    """Poll with custom rpc for disk sync.
6386

6387
    This uses our own step-based rpc call.
6388

6389
    """
6390
    self.feedback_fn("* wait until resync is done")
6391
    all_done = False
6392
    while not all_done:
6393
      all_done = True
6394
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
6395
                                            self.nodes_ip,
6396
                                            self.instance.disks)
6397
      min_percent = 100
6398
      for node, nres in result.items():
6399
        nres.Raise("Cannot resync disks on node %s" % node)
6400
        node_done, node_percent = nres.payload
6401
        all_done = all_done and node_done
6402
        if node_percent is not None:
6403
          min_percent = min(min_percent, node_percent)
6404
      if not all_done:
6405
        if min_percent < 100:
6406
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
6407
        time.sleep(2)
6408

    
6409
  def _EnsureSecondary(self, node):
6410
    """Demote a node to secondary.
6411

6412
    """
6413
    self.feedback_fn("* switching node %s to secondary mode" % node)
6414

    
6415
    for dev in self.instance.disks:
6416
      self.cfg.SetDiskID(dev, node)
6417

    
6418
    result = self.rpc.call_blockdev_close(node, self.instance.name,
6419
                                          self.instance.disks)
6420
    result.Raise("Cannot change disk to secondary on node %s" % node)
6421

    
6422
  def _GoStandalone(self):
6423
    """Disconnect from the network.
6424

6425
    """
6426
    self.feedback_fn("* changing into standalone mode")
6427
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
6428
                                               self.instance.disks)
6429
    for node, nres in result.items():
6430
      nres.Raise("Cannot disconnect disks node %s" % node)
6431

    
6432
  def _GoReconnect(self, multimaster):
6433
    """Reconnect to the network.
6434

6435
    """
6436
    if multimaster:
6437
      msg = "dual-master"
6438
    else:
6439
      msg = "single-master"
6440
    self.feedback_fn("* changing disks into %s mode" % msg)
6441
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
6442
                                           self.instance.disks,
6443
                                           self.instance.name, multimaster)
6444
    for node, nres in result.items():
6445
      nres.Raise("Cannot change disks config on node %s" % node)
6446

    
6447
  def _ExecCleanup(self):
6448
    """Try to cleanup after a failed migration.
6449

6450
    The cleanup is done by:
6451
      - check that the instance is running only on one node
6452
        (and update the config if needed)
6453
      - change disks on its secondary node to secondary
6454
      - wait until disks are fully synchronized
6455
      - disconnect from the network
6456
      - change disks into single-master mode
6457
      - wait again until disks are fully synchronized
6458

6459
    """
6460
    instance = self.instance
6461
    target_node = self.target_node
6462
    source_node = self.source_node
6463

    
6464
    # check running on only one node
6465
    self.feedback_fn("* checking where the instance actually runs"
6466
                     " (if this hangs, the hypervisor might be in"
6467
                     " a bad state)")
6468
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
6469
    for node, result in ins_l.items():
6470
      result.Raise("Can't contact node %s" % node)
6471

    
6472
    runningon_source = instance.name in ins_l[source_node].payload
6473
    runningon_target = instance.name in ins_l[target_node].payload
6474

    
6475
    if runningon_source and runningon_target:
6476
      raise errors.OpExecError("Instance seems to be running on two nodes,"
6477
                               " or the hypervisor is confused. You will have"
6478
                               " to ensure manually that it runs only on one"
6479
                               " and restart this operation.")
6480

    
6481
    if not (runningon_source or runningon_target):
6482
      raise errors.OpExecError("Instance does not seem to be running at all."
6483
                               " In this case, it's safer to repair by"
6484
                               " running 'gnt-instance stop' to ensure disk"
6485
                               " shutdown, and then restarting it.")
6486

    
6487
    if runningon_target:
6488
      # the migration has actually succeeded, we need to update the config
6489
      self.feedback_fn("* instance running on secondary node (%s),"
6490
                       " updating config" % target_node)
6491
      instance.primary_node = target_node
6492
      self.cfg.Update(instance, self.feedback_fn)
6493
      demoted_node = source_node
6494
    else:
6495
      self.feedback_fn("* instance confirmed to be running on its"
6496
                       " primary node (%s)" % source_node)
6497
      demoted_node = target_node
6498

    
6499
    if instance.disk_template in constants.DTS_INT_MIRROR:
6500
      self._EnsureSecondary(demoted_node)
6501
      try:
6502
        self._WaitUntilSync()
6503
      except errors.OpExecError:
6504
        # we ignore here errors, since if the device is standalone, it
6505
        # won't be able to sync
6506
        pass
6507
      self._GoStandalone()
6508
      self._GoReconnect(False)
6509
      self._WaitUntilSync()
6510

    
6511
    self.feedback_fn("* done")
6512

    
6513
  def _RevertDiskStatus(self):
6514
    """Try to revert the disk status after a failed migration.
6515

6516
    """
6517
    target_node = self.target_node
6518
    if self.instance.disk_template in constants.DTS_EXT_MIRROR:
6519
      return
6520

    
6521
    try:
6522
      self._EnsureSecondary(target_node)
6523
      self._GoStandalone()
6524
      self._GoReconnect(False)
6525
      self._WaitUntilSync()
6526
    except errors.OpExecError, err:
6527
      self.lu.LogWarning("Migration failed and I can't reconnect the"
6528
                         " drives: error '%s'\n"
6529
                         "Please look and recover the instance status" %
6530
                         str(err))
6531

    
6532
  def _AbortMigration(self):
6533
    """Call the hypervisor code to abort a started migration.
6534

6535
    """
6536
    instance = self.instance
6537
    target_node = self.target_node
6538
    migration_info = self.migration_info
6539

    
6540
    abort_result = self.rpc.call_finalize_migration(target_node,
6541
                                                    instance,
6542
                                                    migration_info,
6543
                                                    False)
6544
    abort_msg = abort_result.fail_msg
6545
    if abort_msg:
6546
      logging.error("Aborting migration failed on target node %s: %s",
6547
                    target_node, abort_msg)
6548
      # Don't raise an exception here, as we stil have to try to revert the
6549
      # disk status, even if this step failed.
6550

    
6551
  def _ExecMigration(self):
6552
    """Migrate an instance.
6553

6554
    The migrate is done by:
6555
      - change the disks into dual-master mode
6556
      - wait until disks are fully synchronized again
6557
      - migrate the instance
6558
      - change disks on the new secondary node (the old primary) to secondary
6559
      - wait until disks are fully synchronized
6560
      - change disks into single-master mode
6561

6562
    """
6563
    instance = self.instance
6564
    target_node = self.target_node
6565
    source_node = self.source_node
6566

    
6567
    self.feedback_fn("* checking disk consistency between source and target")
6568
    for dev in instance.disks:
6569
      if not _CheckDiskConsistency(self.lu, dev, target_node, False):
6570
        raise errors.OpExecError("Disk %s is degraded or not fully"
6571
                                 " synchronized on target node,"
6572
                                 " aborting migrate." % dev.iv_name)
6573

    
6574
    # First get the migration information from the remote node
6575
    result = self.rpc.call_migration_info(source_node, instance)
6576
    msg = result.fail_msg
6577
    if msg:
6578
      log_err = ("Failed fetching source migration information from %s: %s" %
6579
                 (source_node, msg))
6580
      logging.error(log_err)
6581
      raise errors.OpExecError(log_err)
6582

    
6583
    self.migration_info = migration_info = result.payload
6584

    
6585
    if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
6586
      # Then switch the disks to master/master mode
6587
      self._EnsureSecondary(target_node)
6588
      self._GoStandalone()
6589
      self._GoReconnect(True)
6590
      self._WaitUntilSync()
6591

    
6592
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
6593
    result = self.rpc.call_accept_instance(target_node,
6594
                                           instance,
6595
                                           migration_info,
6596
                                           self.nodes_ip[target_node])
6597

    
6598
    msg = result.fail_msg
6599
    if msg:
6600
      logging.error("Instance pre-migration failed, trying to revert"
6601
                    " disk status: %s", msg)
6602
      self.feedback_fn("Pre-migration failed, aborting")
6603
      self._AbortMigration()
6604
      self._RevertDiskStatus()
6605
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
6606
                               (instance.name, msg))
6607

    
6608
    self.feedback_fn("* migrating instance to %s" % target_node)
6609
    time.sleep(10)
6610
    result = self.rpc.call_instance_migrate(source_node, instance,
6611
                                            self.nodes_ip[target_node],
6612
                                            self.live)
6613
    msg = result.fail_msg
6614
    if msg:
6615
      logging.error("Instance migration failed, trying to revert"
6616
                    " disk status: %s", msg)
6617
      self.feedback_fn("Migration failed, aborting")
6618
      self._AbortMigration()
6619
      self._RevertDiskStatus()
6620
      raise errors.OpExecError("Could not migrate instance %s: %s" %
6621
                               (instance.name, msg))
6622
    time.sleep(10)
6623

    
6624
    instance.primary_node = target_node
6625
    # distribute new instance config to the other nodes
6626
    self.cfg.Update(instance, self.feedback_fn)
6627

    
6628
    result = self.rpc.call_finalize_migration(target_node,
6629
                                              instance,
6630
                                              migration_info,
6631
                                              True)
6632
    msg = result.fail_msg
6633
    if msg:
6634
      logging.error("Instance migration succeeded, but finalization failed:"
6635
                    " %s", msg)
6636
      raise errors.OpExecError("Could not finalize instance migration: %s" %
6637
                               msg)
6638

    
6639
    if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
6640
      self._EnsureSecondary(source_node)
6641
      self._WaitUntilSync()
6642
      self._GoStandalone()
6643
      self._GoReconnect(False)
6644
      self._WaitUntilSync()
6645

    
6646
    self.feedback_fn("* done")
6647

    
6648
  def Exec(self, feedback_fn):
6649
    """Perform the migration.
6650

6651
    """
6652
    feedback_fn("Migrating instance %s" % self.instance.name)
6653

    
6654
    self.feedback_fn = feedback_fn
6655

    
6656
    self.source_node = self.instance.primary_node
6657

    
6658
    # FIXME: if we implement migrate-to-any in DRBD, this needs fixing
6659
    if self.instance.disk_template in constants.DTS_INT_MIRROR:
6660
      self.target_node = self.instance.secondary_nodes[0]
6661
      # Otherwise self.target_node has been populated either
6662
      # directly, or through an iallocator.
6663

    
6664
    self.all_nodes = [self.source_node, self.target_node]
6665
    self.nodes_ip = {
6666
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
6667
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
6668
      }
6669

    
6670
    if self.cleanup:
6671
      return self._ExecCleanup()
6672
    else:
6673
      return self._ExecMigration()
6674

    
6675

    
6676
def _CreateBlockDev(lu, node, instance, device, force_create,
6677
                    info, force_open):
6678
  """Create a tree of block devices on a given node.
6679

6680
  If this device type has to be created on secondaries, create it and
6681
  all its children.
6682

6683
  If not, just recurse to children keeping the same 'force' value.
6684

6685
  @param lu: the lu on whose behalf we execute
6686
  @param node: the node on which to create the device
6687
  @type instance: L{objects.Instance}
6688
  @param instance: the instance which owns the device
6689
  @type device: L{objects.Disk}
6690
  @param device: the device to create
6691
  @type force_create: boolean
6692
  @param force_create: whether to force creation of this device; this
6693
      will be change to True whenever we find a device which has
6694
      CreateOnSecondary() attribute
6695
  @param info: the extra 'metadata' we should attach to the device
6696
      (this will be represented as a LVM tag)
6697
  @type force_open: boolean
6698
  @param force_open: this parameter will be passes to the
6699
      L{backend.BlockdevCreate} function where it specifies
6700
      whether we run on primary or not, and it affects both
6701
      the child assembly and the device own Open() execution
6702

6703
  """
6704
  if device.CreateOnSecondary():
6705
    force_create = True
6706

    
6707
  if device.children:
6708
    for child in device.children:
6709
      _CreateBlockDev(lu, node, instance, child, force_create,
6710
                      info, force_open)
6711

    
6712
  if not force_create:
6713
    return
6714

    
6715
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
6716

    
6717

    
6718
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
6719
  """Create a single block device on a given node.
6720

6721
  This will not recurse over children of the device, so they must be
6722
  created in advance.
6723

6724
  @param lu: the lu on whose behalf we execute
6725
  @param node: the node on which to create the device
6726
  @type instance: L{objects.Instance}
6727
  @param instance: the instance which owns the device
6728
  @type device: L{objects.Disk}
6729
  @param device: the device to create
6730
  @param info: the extra 'metadata' we should attach to the device
6731
      (this will be represented as a LVM tag)
6732
  @type force_open: boolean
6733
  @param force_open: this parameter will be passes to the
6734
      L{backend.BlockdevCreate} function where it specifies
6735
      whether we run on primary or not, and it affects both
6736
      the child assembly and the device own Open() execution
6737

6738
  """
6739
  lu.cfg.SetDiskID(device, node)
6740
  result = lu.rpc.call_blockdev_create(node, device, device.size,
6741
                                       instance.name, force_open, info)
6742
  result.Raise("Can't create block device %s on"
6743
               " node %s for instance %s" % (device, node, instance.name))
6744
  if device.physical_id is None:
6745
    device.physical_id = result.payload
6746

    
6747

    
6748
def _GenerateUniqueNames(lu, exts):
6749
  """Generate a suitable LV name.
6750

6751
  This will generate a logical volume name for the given instance.
6752

6753
  """
6754
  results = []
6755
  for val in exts:
6756
    new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
6757
    results.append("%s%s" % (new_id, val))
6758
  return results
6759

    
6760

    
6761
def _GenerateDRBD8Branch(lu, primary, secondary, size, vgname, names, iv_name,
6762
                         p_minor, s_minor):
6763
  """Generate a drbd8 device complete with its children.
6764

6765
  """
6766
  port = lu.cfg.AllocatePort()
6767
  shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
6768
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
6769
                          logical_id=(vgname, names[0]))
6770
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
6771
                          logical_id=(vgname, names[1]))
6772
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
6773
                          logical_id=(primary, secondary, port,
6774
                                      p_minor, s_minor,
6775
                                      shared_secret),
6776
                          children=[dev_data, dev_meta],
6777
                          iv_name=iv_name)
6778
  return drbd_dev
6779

    
6780

    
6781
def _GenerateDiskTemplate(lu, template_name,
6782
                          instance_name, primary_node,
6783
                          secondary_nodes, disk_info,
6784
                          file_storage_dir, file_driver,
6785
                          base_index, feedback_fn):
6786
  """Generate the entire disk layout for a given template type.
6787

6788
  """
6789
  #TODO: compute space requirements
6790

    
6791
  vgname = lu.cfg.GetVGName()
6792
  disk_count = len(disk_info)
6793
  disks = []
6794
  if template_name == constants.DT_DISKLESS:
6795
    pass
6796
  elif template_name == constants.DT_PLAIN:
6797
    if len(secondary_nodes) != 0:
6798
      raise errors.ProgrammerError("Wrong template configuration")
6799

    
6800
    names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
6801
                                      for i in range(disk_count)])
6802
    for idx, disk in enumerate(disk_info):
6803
      disk_index = idx + base_index
6804
      vg = disk.get("vg", vgname)
6805
      feedback_fn("* disk %i, vg %s, name %s" % (idx, vg, names[idx]))
6806
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
6807
                              logical_id=(vg, names[idx]),
6808
                              iv_name="disk/%d" % disk_index,
6809
                              mode=disk["mode"])
6810
      disks.append(disk_dev)
6811
  elif template_name == constants.DT_DRBD8:
6812
    if len(secondary_nodes) != 1:
6813
      raise errors.ProgrammerError("Wrong template configuration")
6814
    remote_node = secondary_nodes[0]
6815
    minors = lu.cfg.AllocateDRBDMinor(
6816
      [primary_node, remote_node] * len(disk_info), instance_name)
6817

    
6818
    names = []
6819
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
6820
                                               for i in range(disk_count)]):
6821
      names.append(lv_prefix + "_data")
6822
      names.append(lv_prefix + "_meta")
6823
    for idx, disk in enumerate(disk_info):
6824
      disk_index = idx + base_index
6825
      vg = disk.get("vg", vgname)
6826
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
6827
                                      disk["size"], vg, names[idx*2:idx*2+2],
6828
                                      "disk/%d" % disk_index,
6829
                                      minors[idx*2], minors[idx*2+1])
6830
      disk_dev.mode = disk["mode"]
6831
      disks.append(disk_dev)
6832
  elif template_name == constants.DT_FILE:
6833
    if len(secondary_nodes) != 0:
6834
      raise errors.ProgrammerError("Wrong template configuration")
6835

    
6836
    opcodes.RequireFileStorage()
6837

    
6838
    for idx, disk in enumerate(disk_info):
6839
      disk_index = idx + base_index
6840
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
6841
                              iv_name="disk/%d" % disk_index,
6842
                              logical_id=(file_driver,
6843
                                          "%s/disk%d" % (file_storage_dir,
6844
                                                         disk_index)),
6845
                              mode=disk["mode"])
6846
      disks.append(disk_dev)
6847
  elif template_name == constants.DT_SHARED_FILE:
6848
    if len(secondary_nodes) != 0:
6849
      raise errors.ProgrammerError("Wrong template configuration")
6850

    
6851
    opcodes.RequireSharedFileStorage()
6852

    
6853
    for idx, disk in enumerate(disk_info):
6854
      disk_index = idx + base_index
6855
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
6856
                              iv_name="disk/%d" % disk_index,
6857
                              logical_id=(file_driver,
6858
                                          "%s/disk%d" % (file_storage_dir,
6859
                                                         disk_index)),
6860
                              mode=disk["mode"])
6861
      disks.append(disk_dev)
6862
  elif template_name == constants.DT_BLOCK:
6863
    if len(secondary_nodes) != 0:
6864
      raise errors.ProgrammerError("Wrong template configuration")
6865

    
6866
    for idx, disk in enumerate(disk_info):
6867
      disk_index = idx + base_index
6868
      disk_dev = objects.Disk(dev_type=constants.LD_BLOCKDEV, size=disk["size"],
6869
                              logical_id=(constants.BLOCKDEV_DRIVER_MANUAL,
6870
                                          disk["adopt"]),
6871
                              iv_name="disk/%d" % disk_index,
6872
                              mode=disk["mode"])
6873
      disks.append(disk_dev)
6874

    
6875
  else:
6876
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
6877
  return disks
6878

    
6879

    
6880
def _GetInstanceInfoText(instance):
6881
  """Compute that text that should be added to the disk's metadata.
6882

6883
  """
6884
  return "originstname+%s" % instance.name
6885

    
6886

    
6887
def _CalcEta(time_taken, written, total_size):
6888
  """Calculates the ETA based on size written and total size.
6889

6890
  @param time_taken: The time taken so far
6891
  @param written: amount written so far
6892
  @param total_size: The total size of data to be written
6893
  @return: The remaining time in seconds
6894

6895
  """
6896
  avg_time = time_taken / float(written)
6897
  return (total_size - written) * avg_time
6898

    
6899

    
6900
def _WipeDisks(lu, instance):
6901
  """Wipes instance disks.
6902

6903
  @type lu: L{LogicalUnit}
6904
  @param lu: the logical unit on whose behalf we execute
6905
  @type instance: L{objects.Instance}
6906
  @param instance: the instance whose disks we should create
6907
  @return: the success of the wipe
6908

6909
  """
6910
  node = instance.primary_node
6911

    
6912
  for device in instance.disks:
6913
    lu.cfg.SetDiskID(device, node)
6914

    
6915
  logging.info("Pause sync of instance %s disks", instance.name)
6916
  result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, True)
6917

    
6918
  for idx, success in enumerate(result.payload):
6919
    if not success:
6920
      logging.warn("pause-sync of instance %s for disks %d failed",
6921
                   instance.name, idx)
6922

    
6923
  try:
6924
    for idx, device in enumerate(instance.disks):
6925
      lu.LogInfo("* Wiping disk %d", idx)
6926
      logging.info("Wiping disk %d for instance %s, node %s",
6927
                   idx, instance.name, node)
6928

    
6929
      # The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
6930
      # MAX_WIPE_CHUNK at max
6931
      wipe_chunk_size = min(constants.MAX_WIPE_CHUNK, device.size / 100.0 *
6932
                            constants.MIN_WIPE_CHUNK_PERCENT)
6933

    
6934
      offset = 0
6935
      size = device.size
6936
      last_output = 0
6937
      start_time = time.time()
6938

    
6939
      while offset < size:
6940
        wipe_size = min(wipe_chunk_size, size - offset)
6941
        result = lu.rpc.call_blockdev_wipe(node, device, offset, wipe_size)
6942
        result.Raise("Could not wipe disk %d at offset %d for size %d" %
6943
                     (idx, offset, wipe_size))
6944
        now = time.time()
6945
        offset += wipe_size
6946
        if now - last_output >= 60:
6947
          eta = _CalcEta(now - start_time, offset, size)
6948
          lu.LogInfo(" - done: %.1f%% ETA: %s" %
6949
                     (offset / float(size) * 100, utils.FormatSeconds(eta)))
6950
          last_output = now
6951
  finally:
6952
    logging.info("Resume sync of instance %s disks", instance.name)
6953

    
6954
    result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, False)
6955

    
6956
    for idx, success in enumerate(result.payload):
6957
      if not success:
6958
        lu.LogWarning("Warning: Resume sync of disk %d failed. Please have a"
6959
                      " look at the status and troubleshoot the issue.", idx)
6960
        logging.warn("resume-sync of instance %s for disks %d failed",
6961
                     instance.name, idx)
6962

    
6963

    
6964
def _CreateDisks(lu, instance, to_skip=None, target_node=None):
6965
  """Create all disks for an instance.
6966

6967
  This abstracts away some work from AddInstance.
6968

6969
  @type lu: L{LogicalUnit}
6970
  @param lu: the logical unit on whose behalf we execute
6971
  @type instance: L{objects.Instance}
6972
  @param instance: the instance whose disks we should create
6973
  @type to_skip: list
6974
  @param to_skip: list of indices to skip
6975
  @type target_node: string
6976
  @param target_node: if passed, overrides the target node for creation
6977
  @rtype: boolean
6978
  @return: the success of the creation
6979

6980
  """
6981
  info = _GetInstanceInfoText(instance)
6982
  if target_node is None:
6983
    pnode = instance.primary_node
6984
    all_nodes = instance.all_nodes
6985
  else:
6986
    pnode = target_node
6987
    all_nodes = [pnode]
6988

    
6989
  if instance.disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE):
6990
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
6991
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
6992

    
6993
    result.Raise("Failed to create directory '%s' on"
6994
                 " node %s" % (file_storage_dir, pnode))
6995

    
6996
  # Note: this needs to be kept in sync with adding of disks in
6997
  # LUInstanceSetParams
6998
  for idx, device in enumerate(instance.disks):
6999
    if to_skip and idx in to_skip:
7000
      continue
7001
    logging.info("Creating volume %s for instance %s",
7002
                 device.iv_name, instance.name)
7003
    #HARDCODE
7004
    for node in all_nodes:
7005
      f_create = node == pnode
7006
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
7007

    
7008

    
7009
def _RemoveDisks(lu, instance, target_node=None):
7010
  """Remove all disks for an instance.
7011

7012
  This abstracts away some work from `AddInstance()` and
7013
  `RemoveInstance()`. Note that in case some of the devices couldn't
7014
  be removed, the removal will continue with the other ones (compare
7015
  with `_CreateDisks()`).
7016

7017
  @type lu: L{LogicalUnit}
7018
  @param lu: the logical unit on whose behalf we execute
7019
  @type instance: L{objects.Instance}
7020
  @param instance: the instance whose disks we should remove
7021
  @type target_node: string
7022
  @param target_node: used to override the node on which to remove the disks
7023
  @rtype: boolean
7024
  @return: the success of the removal
7025

7026
  """
7027
  logging.info("Removing block devices for instance %s", instance.name)
7028

    
7029
  all_result = True
7030
  for device in instance.disks:
7031
    if target_node:
7032
      edata = [(target_node, device)]
7033
    else:
7034
      edata = device.ComputeNodeTree(instance.primary_node)
7035
    for node, disk in edata:
7036
      lu.cfg.SetDiskID(disk, node)
7037
      msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
7038
      if msg:
7039
        lu.LogWarning("Could not remove block device %s on node %s,"
7040
                      " continuing anyway: %s", device.iv_name, node, msg)
7041
        all_result = False
7042

    
7043
  if instance.disk_template == constants.DT_FILE:
7044
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
7045
    if target_node:
7046
      tgt = target_node
7047
    else:
7048
      tgt = instance.primary_node
7049
    result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
7050
    if result.fail_msg:
7051
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
7052
                    file_storage_dir, instance.primary_node, result.fail_msg)
7053
      all_result = False
7054

    
7055
  return all_result
7056

    
7057

    
7058
def _ComputeDiskSizePerVG(disk_template, disks):
7059
  """Compute disk size requirements in the volume group
7060

7061
  """
7062
  def _compute(disks, payload):
7063
    """Universal algorithm
7064

7065
    """
7066
    vgs = {}
7067
    for disk in disks:
7068
      vgs[disk["vg"]] = vgs.get("vg", 0) + disk["size"] + payload
7069

    
7070
    return vgs
7071

    
7072
  # Required free disk space as a function of disk and swap space
7073
  req_size_dict = {
7074
    constants.DT_DISKLESS: {},
7075
    constants.DT_PLAIN: _compute(disks, 0),
7076
    # 128 MB are added for drbd metadata for each disk
7077
    constants.DT_DRBD8: _compute(disks, 128),
7078
    constants.DT_FILE: {},
7079
    constants.DT_SHARED_FILE: {},
7080
  }
7081

    
7082
  if disk_template not in req_size_dict:
7083
    raise errors.ProgrammerError("Disk template '%s' size requirement"
7084
                                 " is unknown" %  disk_template)
7085

    
7086
  return req_size_dict[disk_template]
7087

    
7088

    
7089
def _ComputeDiskSize(disk_template, disks):
7090
  """Compute disk size requirements in the volume group
7091

7092
  """
7093
  # Required free disk space as a function of disk and swap space
7094
  req_size_dict = {
7095
    constants.DT_DISKLESS: None,
7096
    constants.DT_PLAIN: sum(d["size"] for d in disks),
7097
    # 128 MB are added for drbd metadata for each disk
7098
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
7099
    constants.DT_FILE: None,
7100
    constants.DT_SHARED_FILE: 0,
7101
    constants.DT_BLOCK: 0,
7102
  }
7103

    
7104
  if disk_template not in req_size_dict:
7105
    raise errors.ProgrammerError("Disk template '%s' size requirement"
7106
                                 " is unknown" %  disk_template)
7107

    
7108
  return req_size_dict[disk_template]
7109

    
7110

    
7111
def _FilterVmNodes(lu, nodenames):
7112
  """Filters out non-vm_capable nodes from a list.
7113

7114
  @type lu: L{LogicalUnit}
7115
  @param lu: the logical unit for which we check
7116
  @type nodenames: list
7117
  @param nodenames: the list of nodes on which we should check
7118
  @rtype: list
7119
  @return: the list of vm-capable nodes
7120

7121
  """
7122
  vm_nodes = frozenset(lu.cfg.GetNonVmCapableNodeList())
7123
  return [name for name in nodenames if name not in vm_nodes]
7124

    
7125

    
7126
def _CheckHVParams(lu, nodenames, hvname, hvparams):
7127
  """Hypervisor parameter validation.
7128

7129
  This function abstract the hypervisor parameter validation to be
7130
  used in both instance create and instance modify.
7131

7132
  @type lu: L{LogicalUnit}
7133
  @param lu: the logical unit for which we check
7134
  @type nodenames: list
7135
  @param nodenames: the list of nodes on which we should check
7136
  @type hvname: string
7137
  @param hvname: the name of the hypervisor we should use
7138
  @type hvparams: dict
7139
  @param hvparams: the parameters which we need to check
7140
  @raise errors.OpPrereqError: if the parameters are not valid
7141

7142
  """
7143
  nodenames = _FilterVmNodes(lu, nodenames)
7144
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
7145
                                                  hvname,
7146
                                                  hvparams)
7147
  for node in nodenames:
7148
    info = hvinfo[node]
7149
    if info.offline:
7150
      continue
7151
    info.Raise("Hypervisor parameter validation failed on node %s" % node)
7152

    
7153

    
7154
def _CheckOSParams(lu, required, nodenames, osname, osparams):
7155
  """OS parameters validation.
7156

7157
  @type lu: L{LogicalUnit}
7158
  @param lu: the logical unit for which we check
7159
  @type required: boolean
7160
  @param required: whether the validation should fail if the OS is not
7161
      found
7162
  @type nodenames: list
7163
  @param nodenames: the list of nodes on which we should check
7164
  @type osname: string
7165
  @param osname: the name of the hypervisor we should use
7166
  @type osparams: dict
7167
  @param osparams: the parameters which we need to check
7168
  @raise errors.OpPrereqError: if the parameters are not valid
7169

7170
  """
7171
  nodenames = _FilterVmNodes(lu, nodenames)
7172
  result = lu.rpc.call_os_validate(required, nodenames, osname,
7173
                                   [constants.OS_VALIDATE_PARAMETERS],
7174
                                   osparams)
7175
  for node, nres in result.items():
7176
    # we don't check for offline cases since this should be run only
7177
    # against the master node and/or an instance's nodes
7178
    nres.Raise("OS Parameters validation failed on node %s" % node)
7179
    if not nres.payload:
7180
      lu.LogInfo("OS %s not found on node %s, validation skipped",
7181
                 osname, node)
7182

    
7183

    
7184
class LUInstanceCreate(LogicalUnit):
7185
  """Create an instance.
7186

7187
  """
7188
  HPATH = "instance-add"
7189
  HTYPE = constants.HTYPE_INSTANCE
7190
  REQ_BGL = False
7191

    
7192
  def CheckArguments(self):
7193
    """Check arguments.
7194

7195
    """
7196
    # do not require name_check to ease forward/backward compatibility
7197
    # for tools
7198
    if self.op.no_install and self.op.start:
7199
      self.LogInfo("No-installation mode selected, disabling startup")
7200
      self.op.start = False
7201
    # validate/normalize the instance name
7202
    self.op.instance_name = \
7203
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
7204

    
7205
    if self.op.ip_check and not self.op.name_check:
7206
      # TODO: make the ip check more flexible and not depend on the name check
7207
      raise errors.OpPrereqError("Cannot do ip check without a name check",
7208
                                 errors.ECODE_INVAL)
7209

    
7210
    # check nics' parameter names
7211
    for nic in self.op.nics:
7212
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
7213

    
7214
    # check disks. parameter names and consistent adopt/no-adopt strategy
7215
    has_adopt = has_no_adopt = False
7216
    for disk in self.op.disks:
7217
      utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
7218
      if "adopt" in disk:
7219
        has_adopt = True
7220
      else:
7221
        has_no_adopt = True
7222
    if has_adopt and has_no_adopt:
7223
      raise errors.OpPrereqError("Either all disks are adopted or none is",
7224
                                 errors.ECODE_INVAL)
7225
    if has_adopt:
7226
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
7227
        raise errors.OpPrereqError("Disk adoption is not supported for the"
7228
                                   " '%s' disk template" %
7229
                                   self.op.disk_template,
7230
                                   errors.ECODE_INVAL)
7231
      if self.op.iallocator is not None:
7232
        raise errors.OpPrereqError("Disk adoption not allowed with an"
7233
                                   " iallocator script", errors.ECODE_INVAL)
7234
      if self.op.mode == constants.INSTANCE_IMPORT:
7235
        raise errors.OpPrereqError("Disk adoption not allowed for"
7236
                                   " instance import", errors.ECODE_INVAL)
7237
    else:
7238
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
7239
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
7240
                                   " but no 'adopt' parameter given" %
7241
                                   self.op.disk_template,
7242
                                   errors.ECODE_INVAL)
7243

    
7244
    self.adopt_disks = has_adopt
7245

    
7246
    # instance name verification
7247
    if self.op.name_check:
7248
      self.hostname1 = netutils.GetHostname(name=self.op.instance_name)
7249
      self.op.instance_name = self.hostname1.name
7250
      # used in CheckPrereq for ip ping check
7251
      self.check_ip = self.hostname1.ip
7252
    else:
7253
      self.check_ip = None
7254

    
7255
    # file storage checks
7256
    if (self.op.file_driver and
7257
        not self.op.file_driver in constants.FILE_DRIVER):
7258
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
7259
                                 self.op.file_driver, errors.ECODE_INVAL)
7260

    
7261
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
7262
      raise errors.OpPrereqError("File storage directory path not absolute",
7263
                                 errors.ECODE_INVAL)
7264

    
7265
    ### Node/iallocator related checks
7266
    _CheckIAllocatorOrNode(self, "iallocator", "pnode")
7267

    
7268
    if self.op.pnode is not None:
7269
      if self.op.disk_template in constants.DTS_INT_MIRROR:
7270
        if self.op.snode is None:
7271
          raise errors.OpPrereqError("The networked disk templates need"
7272
                                     " a mirror node", errors.ECODE_INVAL)
7273
      elif self.op.snode:
7274
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
7275
                        " template")
7276
        self.op.snode = None
7277

    
7278
    self._cds = _GetClusterDomainSecret()
7279

    
7280
    if self.op.mode == constants.INSTANCE_IMPORT:
7281
      # On import force_variant must be True, because if we forced it at
7282
      # initial install, our only chance when importing it back is that it
7283
      # works again!
7284
      self.op.force_variant = True
7285

    
7286
      if self.op.no_install:
7287
        self.LogInfo("No-installation mode has no effect during import")
7288

    
7289
    elif self.op.mode == constants.INSTANCE_CREATE:
7290
      if self.op.os_type is None:
7291
        raise errors.OpPrereqError("No guest OS specified",
7292
                                   errors.ECODE_INVAL)
7293
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
7294
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
7295
                                   " installation" % self.op.os_type,
7296
                                   errors.ECODE_STATE)
7297
      if self.op.disk_template is None:
7298
        raise errors.OpPrereqError("No disk template specified",
7299
                                   errors.ECODE_INVAL)
7300

    
7301
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
7302
      # Check handshake to ensure both clusters have the same domain secret
7303
      src_handshake = self.op.source_handshake
7304
      if not src_handshake:
7305
        raise errors.OpPrereqError("Missing source handshake",
7306
                                   errors.ECODE_INVAL)
7307

    
7308
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
7309
                                                           src_handshake)
7310
      if errmsg:
7311
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
7312
                                   errors.ECODE_INVAL)
7313

    
7314
      # Load and check source CA
7315
      self.source_x509_ca_pem = self.op.source_x509_ca
7316
      if not self.source_x509_ca_pem:
7317
        raise errors.OpPrereqError("Missing source X509 CA",
7318
                                   errors.ECODE_INVAL)
7319

    
7320
      try:
7321
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
7322
                                                    self._cds)
7323
      except OpenSSL.crypto.Error, err:
7324
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
7325
                                   (err, ), errors.ECODE_INVAL)
7326

    
7327
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
7328
      if errcode is not None:
7329
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
7330
                                   errors.ECODE_INVAL)
7331

    
7332
      self.source_x509_ca = cert
7333

    
7334
      src_instance_name = self.op.source_instance_name
7335
      if not src_instance_name:
7336
        raise errors.OpPrereqError("Missing source instance name",
7337
                                   errors.ECODE_INVAL)
7338

    
7339
      self.source_instance_name = \
7340
          netutils.GetHostname(name=src_instance_name).name
7341

    
7342
    else:
7343
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
7344
                                 self.op.mode, errors.ECODE_INVAL)
7345

    
7346
  def ExpandNames(self):
7347
    """ExpandNames for CreateInstance.
7348

7349
    Figure out the right locks for instance creation.
7350

7351
    """
7352
    self.needed_locks = {}
7353

    
7354
    instance_name = self.op.instance_name
7355
    # this is just a preventive check, but someone might still add this
7356
    # instance in the meantime, and creation will fail at lock-add time
7357
    if instance_name in self.cfg.GetInstanceList():
7358
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
7359
                                 instance_name, errors.ECODE_EXISTS)
7360

    
7361
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
7362

    
7363
    if self.op.iallocator:
7364
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7365
    else:
7366
      self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
7367
      nodelist = [self.op.pnode]
7368
      if self.op.snode is not None:
7369
        self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
7370
        nodelist.append(self.op.snode)
7371
      self.needed_locks[locking.LEVEL_NODE] = nodelist
7372

    
7373
    # in case of import lock the source node too
7374
    if self.op.mode == constants.INSTANCE_IMPORT:
7375
      src_node = self.op.src_node
7376
      src_path = self.op.src_path
7377

    
7378
      if src_path is None:
7379
        self.op.src_path = src_path = self.op.instance_name
7380

    
7381
      if src_node is None:
7382
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7383
        self.op.src_node = None
7384
        if os.path.isabs(src_path):
7385
          raise errors.OpPrereqError("Importing an instance from an absolute"
7386
                                     " path requires a source node option.",
7387
                                     errors.ECODE_INVAL)
7388
      else:
7389
        self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
7390
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
7391
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
7392
        if not os.path.isabs(src_path):
7393
          self.op.src_path = src_path = \
7394
            utils.PathJoin(constants.EXPORT_DIR, src_path)
7395

    
7396
  def _RunAllocator(self):
7397
    """Run the allocator based on input opcode.
7398

7399
    """
7400
    nics = [n.ToDict() for n in self.nics]
7401
    ial = IAllocator(self.cfg, self.rpc,
7402
                     mode=constants.IALLOCATOR_MODE_ALLOC,
7403
                     name=self.op.instance_name,
7404
                     disk_template=self.op.disk_template,
7405
                     tags=[],
7406
                     os=self.op.os_type,
7407
                     vcpus=self.be_full[constants.BE_VCPUS],
7408
                     mem_size=self.be_full[constants.BE_MEMORY],
7409
                     disks=self.disks,
7410
                     nics=nics,
7411
                     hypervisor=self.op.hypervisor,
7412
                     )
7413

    
7414
    ial.Run(self.op.iallocator)
7415

    
7416
    if not ial.success:
7417
      raise errors.OpPrereqError("Can't compute nodes using"
7418
                                 " iallocator '%s': %s" %
7419
                                 (self.op.iallocator, ial.info),
7420
                                 errors.ECODE_NORES)
7421
    if len(ial.result) != ial.required_nodes:
7422
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
7423
                                 " of nodes (%s), required %s" %
7424
                                 (self.op.iallocator, len(ial.result),
7425
                                  ial.required_nodes), errors.ECODE_FAULT)
7426
    self.op.pnode = ial.result[0]
7427
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
7428
                 self.op.instance_name, self.op.iallocator,
7429
                 utils.CommaJoin(ial.result))
7430
    if ial.required_nodes == 2:
7431
      self.op.snode = ial.result[1]
7432

    
7433
  def BuildHooksEnv(self):
7434
    """Build hooks env.
7435

7436
    This runs on master, primary and secondary nodes of the instance.
7437

7438
    """
7439
    env = {
7440
      "ADD_MODE": self.op.mode,
7441
      }
7442
    if self.op.mode == constants.INSTANCE_IMPORT:
7443
      env["SRC_NODE"] = self.op.src_node
7444
      env["SRC_PATH"] = self.op.src_path
7445
      env["SRC_IMAGES"] = self.src_images
7446

    
7447
    env.update(_BuildInstanceHookEnv(
7448
      name=self.op.instance_name,
7449
      primary_node=self.op.pnode,
7450
      secondary_nodes=self.secondaries,
7451
      status=self.op.start,
7452
      os_type=self.op.os_type,
7453
      memory=self.be_full[constants.BE_MEMORY],
7454
      vcpus=self.be_full[constants.BE_VCPUS],
7455
      nics=_NICListToTuple(self, self.nics),
7456
      disk_template=self.op.disk_template,
7457
      disks=[(d["size"], d["mode"]) for d in self.disks],
7458
      bep=self.be_full,
7459
      hvp=self.hv_full,
7460
      hypervisor_name=self.op.hypervisor,
7461
    ))
7462

    
7463
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
7464
          self.secondaries)
7465
    return env, nl, nl
7466

    
7467
  def _ReadExportInfo(self):
7468
    """Reads the export information from disk.
7469

7470
    It will override the opcode source node and path with the actual
7471
    information, if these two were not specified before.
7472

7473
    @return: the export information
7474

7475
    """
7476
    assert self.op.mode == constants.INSTANCE_IMPORT
7477

    
7478
    src_node = self.op.src_node
7479
    src_path = self.op.src_path
7480

    
7481
    if src_node is None:
7482
      locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
7483
      exp_list = self.rpc.call_export_list(locked_nodes)
7484
      found = False
7485
      for node in exp_list:
7486
        if exp_list[node].fail_msg:
7487
          continue
7488
        if src_path in exp_list[node].payload:
7489
          found = True
7490
          self.op.src_node = src_node = node
7491
          self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
7492
                                                       src_path)
7493
          break
7494
      if not found:
7495
        raise errors.OpPrereqError("No export found for relative path %s" %
7496
                                    src_path, errors.ECODE_INVAL)
7497

    
7498
    _CheckNodeOnline(self, src_node)
7499
    result = self.rpc.call_export_info(src_node, src_path)
7500
    result.Raise("No export or invalid export found in dir %s" % src_path)
7501

    
7502
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
7503
    if not export_info.has_section(constants.INISECT_EXP):
7504
      raise errors.ProgrammerError("Corrupted export config",
7505
                                   errors.ECODE_ENVIRON)
7506

    
7507
    ei_version = export_info.get(constants.INISECT_EXP, "version")
7508
    if (int(ei_version) != constants.EXPORT_VERSION):
7509
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
7510
                                 (ei_version, constants.EXPORT_VERSION),
7511
                                 errors.ECODE_ENVIRON)
7512
    return export_info
7513

    
7514
  def _ReadExportParams(self, einfo):
7515
    """Use export parameters as defaults.
7516

7517
    In case the opcode doesn't specify (as in override) some instance
7518
    parameters, then try to use them from the export information, if
7519
    that declares them.
7520

7521
    """
7522
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
7523

    
7524
    if self.op.disk_template is None:
7525
      if einfo.has_option(constants.INISECT_INS, "disk_template"):
7526
        self.op.disk_template = einfo.get(constants.INISECT_INS,
7527
                                          "disk_template")
7528
      else:
7529
        raise errors.OpPrereqError("No disk template specified and the export"
7530
                                   " is missing the disk_template information",
7531
                                   errors.ECODE_INVAL)
7532

    
7533
    if not self.op.disks:
7534
      if einfo.has_option(constants.INISECT_INS, "disk_count"):
7535
        disks = []
7536
        # TODO: import the disk iv_name too
7537
        for idx in range(einfo.getint(constants.INISECT_INS, "disk_count")):
7538
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
7539
          disks.append({"size": disk_sz})
7540
        self.op.disks = disks
7541
      else:
7542
        raise errors.OpPrereqError("No disk info specified and the export"
7543
                                   " is missing the disk information",
7544
                                   errors.ECODE_INVAL)
7545

    
7546
    if (not self.op.nics and
7547
        einfo.has_option(constants.INISECT_INS, "nic_count")):
7548
      nics = []
7549
      for idx in range(einfo.getint(constants.INISECT_INS, "nic_count")):
7550
        ndict = {}
7551
        for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
7552
          v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
7553
          ndict[name] = v
7554
        nics.append(ndict)
7555
      self.op.nics = nics
7556

    
7557
    if (self.op.hypervisor is None and
7558
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
7559
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
7560
    if einfo.has_section(constants.INISECT_HYP):
7561
      # use the export parameters but do not override the ones
7562
      # specified by the user
7563
      for name, value in einfo.items(constants.INISECT_HYP):
7564
        if name not in self.op.hvparams:
7565
          self.op.hvparams[name] = value
7566

    
7567
    if einfo.has_section(constants.INISECT_BEP):
7568
      # use the parameters, without overriding
7569
      for name, value in einfo.items(constants.INISECT_BEP):
7570
        if name not in self.op.beparams:
7571
          self.op.beparams[name] = value
7572
    else:
7573
      # try to read the parameters old style, from the main section
7574
      for name in constants.BES_PARAMETERS:
7575
        if (name not in self.op.beparams and
7576
            einfo.has_option(constants.INISECT_INS, name)):
7577
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
7578

    
7579
    if einfo.has_section(constants.INISECT_OSP):
7580
      # use the parameters, without overriding
7581
      for name, value in einfo.items(constants.INISECT_OSP):
7582
        if name not in self.op.osparams:
7583
          self.op.osparams[name] = value
7584

    
7585
  def _RevertToDefaults(self, cluster):
7586
    """Revert the instance parameters to the default values.
7587

7588
    """
7589
    # hvparams
7590
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
7591
    for name in self.op.hvparams.keys():
7592
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
7593
        del self.op.hvparams[name]
7594
    # beparams
7595
    be_defs = cluster.SimpleFillBE({})
7596
    for name in self.op.beparams.keys():
7597
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
7598
        del self.op.beparams[name]
7599
    # nic params
7600
    nic_defs = cluster.SimpleFillNIC({})
7601
    for nic in self.op.nics:
7602
      for name in constants.NICS_PARAMETERS:
7603
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
7604
          del nic[name]
7605
    # osparams
7606
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
7607
    for name in self.op.osparams.keys():
7608
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
7609
        del self.op.osparams[name]
7610

    
7611
  def CheckPrereq(self):
7612
    """Check prerequisites.
7613

7614
    """
7615
    if self.op.mode == constants.INSTANCE_IMPORT:
7616
      export_info = self._ReadExportInfo()
7617
      self._ReadExportParams(export_info)
7618

    
7619
    if (not self.cfg.GetVGName() and
7620
        self.op.disk_template not in constants.DTS_NOT_LVM):
7621
      raise errors.OpPrereqError("Cluster does not support lvm-based"
7622
                                 " instances", errors.ECODE_STATE)
7623

    
7624
    if self.op.hypervisor is None:
7625
      self.op.hypervisor = self.cfg.GetHypervisorType()
7626

    
7627
    cluster = self.cfg.GetClusterInfo()
7628
    enabled_hvs = cluster.enabled_hypervisors
7629
    if self.op.hypervisor not in enabled_hvs:
7630
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
7631
                                 " cluster (%s)" % (self.op.hypervisor,
7632
                                  ",".join(enabled_hvs)),
7633
                                 errors.ECODE_STATE)
7634

    
7635
    # check hypervisor parameter syntax (locally)
7636
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
7637
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
7638
                                      self.op.hvparams)
7639
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
7640
    hv_type.CheckParameterSyntax(filled_hvp)
7641
    self.hv_full = filled_hvp
7642
    # check that we don't specify global parameters on an instance
7643
    _CheckGlobalHvParams(self.op.hvparams)
7644

    
7645
    # fill and remember the beparams dict
7646
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
7647
    self.be_full = cluster.SimpleFillBE(self.op.beparams)
7648

    
7649
    # build os parameters
7650
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
7651

    
7652
    # now that hvp/bep are in final format, let's reset to defaults,
7653
    # if told to do so
7654
    if self.op.identify_defaults:
7655
      self._RevertToDefaults(cluster)
7656

    
7657
    # NIC buildup
7658
    self.nics = []
7659
    for idx, nic in enumerate(self.op.nics):
7660
      nic_mode_req = nic.get("mode", None)
7661
      nic_mode = nic_mode_req
7662
      if nic_mode is None:
7663
        nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
7664

    
7665
      # in routed mode, for the first nic, the default ip is 'auto'
7666
      if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
7667
        default_ip_mode = constants.VALUE_AUTO
7668
      else:
7669
        default_ip_mode = constants.VALUE_NONE
7670

    
7671
      # ip validity checks
7672
      ip = nic.get("ip", default_ip_mode)
7673
      if ip is None or ip.lower() == constants.VALUE_NONE:
7674
        nic_ip = None
7675
      elif ip.lower() == constants.VALUE_AUTO:
7676
        if not self.op.name_check:
7677
          raise errors.OpPrereqError("IP address set to auto but name checks"
7678
                                     " have been skipped",
7679
                                     errors.ECODE_INVAL)
7680
        nic_ip = self.hostname1.ip
7681
      else:
7682
        if not netutils.IPAddress.IsValid(ip):
7683
          raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
7684
                                     errors.ECODE_INVAL)
7685
        nic_ip = ip
7686

    
7687
      # TODO: check the ip address for uniqueness
7688
      if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
7689
        raise errors.OpPrereqError("Routed nic mode requires an ip address",
7690
                                   errors.ECODE_INVAL)
7691

    
7692
      # MAC address verification
7693
      mac = nic.get("mac", constants.VALUE_AUTO)
7694
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7695
        mac = utils.NormalizeAndValidateMac(mac)
7696

    
7697
        try:
7698
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
7699
        except errors.ReservationError:
7700
          raise errors.OpPrereqError("MAC address %s already in use"
7701
                                     " in cluster" % mac,
7702
                                     errors.ECODE_NOTUNIQUE)
7703

    
7704
      #  Build nic parameters
7705
      link = nic.get(constants.INIC_LINK, None)
7706
      nicparams = {}
7707
      if nic_mode_req:
7708
        nicparams[constants.NIC_MODE] = nic_mode_req
7709
      if link:
7710
        nicparams[constants.NIC_LINK] = link
7711

    
7712
      check_params = cluster.SimpleFillNIC(nicparams)
7713
      objects.NIC.CheckParameterSyntax(check_params)
7714
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
7715

    
7716
    # disk checks/pre-build
7717
    self.disks = []
7718
    for disk in self.op.disks:
7719
      mode = disk.get("mode", constants.DISK_RDWR)
7720
      if mode not in constants.DISK_ACCESS_SET:
7721
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
7722
                                   mode, errors.ECODE_INVAL)
7723
      size = disk.get("size", None)
7724
      if size is None:
7725
        raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
7726
      try:
7727
        size = int(size)
7728
      except (TypeError, ValueError):
7729
        raise errors.OpPrereqError("Invalid disk size '%s'" % size,
7730
                                   errors.ECODE_INVAL)
7731
      vg = disk.get("vg", self.cfg.GetVGName())
7732
      new_disk = {"size": size, "mode": mode, "vg": vg}
7733
      if "adopt" in disk:
7734
        new_disk["adopt"] = disk["adopt"]
7735
      self.disks.append(new_disk)
7736

    
7737
    if self.op.mode == constants.INSTANCE_IMPORT:
7738

    
7739
      # Check that the new instance doesn't have less disks than the export
7740
      instance_disks = len(self.disks)
7741
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
7742
      if instance_disks < export_disks:
7743
        raise errors.OpPrereqError("Not enough disks to import."
7744
                                   " (instance: %d, export: %d)" %
7745
                                   (instance_disks, export_disks),
7746
                                   errors.ECODE_INVAL)
7747

    
7748
      disk_images = []
7749
      for idx in range(export_disks):
7750
        option = 'disk%d_dump' % idx
7751
        if export_info.has_option(constants.INISECT_INS, option):
7752
          # FIXME: are the old os-es, disk sizes, etc. useful?
7753
          export_name = export_info.get(constants.INISECT_INS, option)
7754
          image = utils.PathJoin(self.op.src_path, export_name)
7755
          disk_images.append(image)
7756
        else:
7757
          disk_images.append(False)
7758

    
7759
      self.src_images = disk_images
7760

    
7761
      old_name = export_info.get(constants.INISECT_INS, 'name')
7762
      try:
7763
        exp_nic_count = export_info.getint(constants.INISECT_INS, 'nic_count')
7764
      except (TypeError, ValueError), err:
7765
        raise errors.OpPrereqError("Invalid export file, nic_count is not"
7766
                                   " an integer: %s" % str(err),
7767
                                   errors.ECODE_STATE)
7768
      if self.op.instance_name == old_name:
7769
        for idx, nic in enumerate(self.nics):
7770
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
7771
            nic_mac_ini = 'nic%d_mac' % idx
7772
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
7773

    
7774
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
7775

    
7776
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
7777
    if self.op.ip_check:
7778
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
7779
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
7780
                                   (self.check_ip, self.op.instance_name),
7781
                                   errors.ECODE_NOTUNIQUE)
7782

    
7783
    #### mac address generation
7784
    # By generating here the mac address both the allocator and the hooks get
7785
    # the real final mac address rather than the 'auto' or 'generate' value.
7786
    # There is a race condition between the generation and the instance object
7787
    # creation, which means that we know the mac is valid now, but we're not
7788
    # sure it will be when we actually add the instance. If things go bad
7789
    # adding the instance will abort because of a duplicate mac, and the
7790
    # creation job will fail.
7791
    for nic in self.nics:
7792
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7793
        nic.mac = self.cfg.GenerateMAC(self.proc.GetECId())
7794

    
7795
    #### allocator run
7796

    
7797
    if self.op.iallocator is not None:
7798
      self._RunAllocator()
7799

    
7800
    #### node related checks
7801

    
7802
    # check primary node
7803
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
7804
    assert self.pnode is not None, \
7805
      "Cannot retrieve locked node %s" % self.op.pnode
7806
    if pnode.offline:
7807
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
7808
                                 pnode.name, errors.ECODE_STATE)
7809
    if pnode.drained:
7810
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
7811
                                 pnode.name, errors.ECODE_STATE)
7812
    if not pnode.vm_capable:
7813
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
7814
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
7815

    
7816
    self.secondaries = []
7817

    
7818
    # mirror node verification
7819
    if self.op.disk_template in constants.DTS_INT_MIRROR:
7820
      if self.op.snode == pnode.name:
7821
        raise errors.OpPrereqError("The secondary node cannot be the"
7822
                                   " primary node.", errors.ECODE_INVAL)
7823
      _CheckNodeOnline(self, self.op.snode)
7824
      _CheckNodeNotDrained(self, self.op.snode)
7825
      _CheckNodeVmCapable(self, self.op.snode)
7826
      self.secondaries.append(self.op.snode)
7827

    
7828
    nodenames = [pnode.name] + self.secondaries
7829

    
7830
    if not self.adopt_disks:
7831
      # Check lv size requirements, if not adopting
7832
      req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
7833
      _CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
7834

    
7835
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
7836
      all_lvs = set([i["vg"] + "/" + i["adopt"] for i in self.disks])
7837
      if len(all_lvs) != len(self.disks):
7838
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
7839
                                   errors.ECODE_INVAL)
7840
      for lv_name in all_lvs:
7841
        try:
7842
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
7843
          # to ReserveLV uses the same syntax
7844
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
7845
        except errors.ReservationError:
7846
          raise errors.OpPrereqError("LV named %s used by another instance" %
7847
                                     lv_name, errors.ECODE_NOTUNIQUE)
7848

    
7849
      vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
7850
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
7851

    
7852
      node_lvs = self.rpc.call_lv_list([pnode.name],
7853
                                       vg_names.payload.keys())[pnode.name]
7854
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
7855
      node_lvs = node_lvs.payload
7856

    
7857
      delta = all_lvs.difference(node_lvs.keys())
7858
      if delta:
7859
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
7860
                                   utils.CommaJoin(delta),
7861
                                   errors.ECODE_INVAL)
7862
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
7863
      if online_lvs:
7864
        raise errors.OpPrereqError("Online logical volumes found, cannot"
7865
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
7866
                                   errors.ECODE_STATE)
7867
      # update the size of disk based on what is found
7868
      for dsk in self.disks:
7869
        dsk["size"] = int(float(node_lvs[dsk["vg"] + "/" + dsk["adopt"]][0]))
7870

    
7871
    elif self.op.disk_template == constants.DT_BLOCK:
7872
      # Normalize and de-duplicate device paths
7873
      all_disks = set([os.path.abspath(i["adopt"]) for i in self.disks])
7874
      if len(all_disks) != len(self.disks):
7875
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
7876
                                   errors.ECODE_INVAL)
7877
      baddisks = [d for d in all_disks
7878
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
7879
      if baddisks:
7880
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
7881
                                   " cannot be adopted" %
7882
                                   (", ".join(baddisks),
7883
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
7884
                                   errors.ECODE_INVAL)
7885

    
7886
      node_disks = self.rpc.call_bdev_sizes([pnode.name],
7887
                                            list(all_disks))[pnode.name]
7888
      node_disks.Raise("Cannot get block device information from node %s" %
7889
                       pnode.name)
7890
      node_disks = node_disks.payload
7891
      delta = all_disks.difference(node_disks.keys())
7892
      if delta:
7893
        raise errors.OpPrereqError("Missing block device(s): %s" %
7894
                                   utils.CommaJoin(delta),
7895
                                   errors.ECODE_INVAL)
7896
      for dsk in self.disks:
7897
        dsk["size"] = int(float(node_disks[dsk["adopt"]]))
7898

    
7899
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
7900

    
7901
    _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
7902
    # check OS parameters (remotely)
7903
    _CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
7904

    
7905
    _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
7906

    
7907
    # memory check on primary node
7908
    if self.op.start:
7909
      _CheckNodeFreeMemory(self, self.pnode.name,
7910
                           "creating instance %s" % self.op.instance_name,
7911
                           self.be_full[constants.BE_MEMORY],
7912
                           self.op.hypervisor)
7913

    
7914
    self.dry_run_result = list(nodenames)
7915

    
7916
  def Exec(self, feedback_fn):
7917
    """Create and add the instance to the cluster.
7918

7919
    """
7920
    instance = self.op.instance_name
7921
    pnode_name = self.pnode.name
7922

    
7923
    ht_kind = self.op.hypervisor
7924
    if ht_kind in constants.HTS_REQ_PORT:
7925
      network_port = self.cfg.AllocatePort()
7926
    else:
7927
      network_port = None
7928

    
7929
    if constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE:
7930
      # this is needed because os.path.join does not accept None arguments
7931
      if self.op.file_storage_dir is None:
7932
        string_file_storage_dir = ""
7933
      else:
7934
        string_file_storage_dir = self.op.file_storage_dir
7935

    
7936
      # build the full file storage dir path
7937
      if self.op.disk_template == constants.DT_SHARED_FILE:
7938
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
7939
      else:
7940
        get_fsd_fn = self.cfg.GetFileStorageDir
7941

    
7942
      file_storage_dir = utils.PathJoin(get_fsd_fn(),
7943
                                        string_file_storage_dir, instance)
7944
    else:
7945
      file_storage_dir = ""
7946

    
7947
    disks = _GenerateDiskTemplate(self,
7948
                                  self.op.disk_template,
7949
                                  instance, pnode_name,
7950
                                  self.secondaries,
7951
                                  self.disks,
7952
                                  file_storage_dir,
7953
                                  self.op.file_driver,
7954
                                  0,
7955
                                  feedback_fn)
7956

    
7957
    iobj = objects.Instance(name=instance, os=self.op.os_type,
7958
                            primary_node=pnode_name,
7959
                            nics=self.nics, disks=disks,
7960
                            disk_template=self.op.disk_template,
7961
                            admin_up=False,
7962
                            network_port=network_port,
7963
                            beparams=self.op.beparams,
7964
                            hvparams=self.op.hvparams,
7965
                            hypervisor=self.op.hypervisor,
7966
                            osparams=self.op.osparams,
7967
                            )
7968

    
7969
    if self.adopt_disks:
7970
      if self.op.disk_template == constants.DT_PLAIN:
7971
        # rename LVs to the newly-generated names; we need to construct
7972
        # 'fake' LV disks with the old data, plus the new unique_id
7973
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
7974
        rename_to = []
7975
        for t_dsk, a_dsk in zip (tmp_disks, self.disks):
7976
          rename_to.append(t_dsk.logical_id)
7977
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk["adopt"])
7978
          self.cfg.SetDiskID(t_dsk, pnode_name)
7979
        result = self.rpc.call_blockdev_rename(pnode_name,
7980
                                               zip(tmp_disks, rename_to))
7981
        result.Raise("Failed to rename adoped LVs")
7982
    else:
7983
      feedback_fn("* creating instance disks...")
7984
      try:
7985
        _CreateDisks(self, iobj)
7986
      except errors.OpExecError:
7987
        self.LogWarning("Device creation failed, reverting...")
7988
        try:
7989
          _RemoveDisks(self, iobj)
7990
        finally:
7991
          self.cfg.ReleaseDRBDMinors(instance)
7992
          raise
7993

    
7994
      if self.cfg.GetClusterInfo().prealloc_wipe_disks:
7995
        feedback_fn("* wiping instance disks...")
7996
        try:
7997
          _WipeDisks(self, iobj)
7998
        except errors.OpExecError:
7999
          self.LogWarning("Device wiping failed, reverting...")
8000
          try:
8001
            _RemoveDisks(self, iobj)
8002
          finally:
8003
            self.cfg.ReleaseDRBDMinors(instance)
8004
            raise
8005

    
8006
    feedback_fn("adding instance %s to cluster config" % instance)
8007

    
8008
    self.cfg.AddInstance(iobj, self.proc.GetECId())
8009

    
8010
    # Declare that we don't want to remove the instance lock anymore, as we've
8011
    # added the instance to the config
8012
    del self.remove_locks[locking.LEVEL_INSTANCE]
8013
    # Unlock all the nodes
8014
    if self.op.mode == constants.INSTANCE_IMPORT:
8015
      nodes_keep = [self.op.src_node]
8016
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
8017
                       if node != self.op.src_node]
8018
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
8019
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
8020
    else:
8021
      self.context.glm.release(locking.LEVEL_NODE)
8022
      del self.acquired_locks[locking.LEVEL_NODE]
8023

    
8024
    if self.op.wait_for_sync:
8025
      disk_abort = not _WaitForSync(self, iobj)
8026
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
8027
      # make sure the disks are not degraded (still sync-ing is ok)
8028
      time.sleep(15)
8029
      feedback_fn("* checking mirrors status")
8030
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
8031
    else:
8032
      disk_abort = False
8033

    
8034
    if disk_abort:
8035
      _RemoveDisks(self, iobj)
8036
      self.cfg.RemoveInstance(iobj.name)
8037
      # Make sure the instance lock gets removed
8038
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
8039
      raise errors.OpExecError("There are some degraded disks for"
8040
                               " this instance")
8041

    
8042
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
8043
      if self.op.mode == constants.INSTANCE_CREATE:
8044
        if not self.op.no_install:
8045
          feedback_fn("* running the instance OS create scripts...")
8046
          # FIXME: pass debug option from opcode to backend
8047
          result = self.rpc.call_instance_os_add(pnode_name, iobj, False,
8048
                                                 self.op.debug_level)
8049
          result.Raise("Could not add os for instance %s"
8050
                       " on node %s" % (instance, pnode_name))
8051

    
8052
      elif self.op.mode == constants.INSTANCE_IMPORT:
8053
        feedback_fn("* running the instance OS import scripts...")
8054

    
8055
        transfers = []
8056

    
8057
        for idx, image in enumerate(self.src_images):
8058
          if not image:
8059
            continue
8060

    
8061
          # FIXME: pass debug option from opcode to backend
8062
          dt = masterd.instance.DiskTransfer("disk/%s" % idx,
8063
                                             constants.IEIO_FILE, (image, ),
8064
                                             constants.IEIO_SCRIPT,
8065
                                             (iobj.disks[idx], idx),
8066
                                             None)
8067
          transfers.append(dt)
8068

    
8069
        import_result = \
8070
          masterd.instance.TransferInstanceData(self, feedback_fn,
8071
                                                self.op.src_node, pnode_name,
8072
                                                self.pnode.secondary_ip,
8073
                                                iobj, transfers)
8074
        if not compat.all(import_result):
8075
          self.LogWarning("Some disks for instance %s on node %s were not"
8076
                          " imported successfully" % (instance, pnode_name))
8077

    
8078
      elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
8079
        feedback_fn("* preparing remote import...")
8080
        # The source cluster will stop the instance before attempting to make a
8081
        # connection. In some cases stopping an instance can take a long time,
8082
        # hence the shutdown timeout is added to the connection timeout.
8083
        connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
8084
                           self.op.source_shutdown_timeout)
8085
        timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
8086

    
8087
        assert iobj.primary_node == self.pnode.name
8088
        disk_results = \
8089
          masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
8090
                                        self.source_x509_ca,
8091
                                        self._cds, timeouts)
8092
        if not compat.all(disk_results):
8093
          # TODO: Should the instance still be started, even if some disks
8094
          # failed to import (valid for local imports, too)?
8095
          self.LogWarning("Some disks for instance %s on node %s were not"
8096
                          " imported successfully" % (instance, pnode_name))
8097

    
8098
        # Run rename script on newly imported instance
8099
        assert iobj.name == instance
8100
        feedback_fn("Running rename script for %s" % instance)
8101
        result = self.rpc.call_instance_run_rename(pnode_name, iobj,
8102
                                                   self.source_instance_name,
8103
                                                   self.op.debug_level)
8104
        if result.fail_msg:
8105
          self.LogWarning("Failed to run rename script for %s on node"
8106
                          " %s: %s" % (instance, pnode_name, result.fail_msg))
8107

    
8108
      else:
8109
        # also checked in the prereq part
8110
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
8111
                                     % self.op.mode)
8112

    
8113
    if self.op.start:
8114
      iobj.admin_up = True
8115
      self.cfg.Update(iobj, feedback_fn)
8116
      logging.info("Starting instance %s on node %s", instance, pnode_name)
8117
      feedback_fn("* starting instance...")
8118
      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
8119
      result.Raise("Could not start instance")
8120

    
8121
    return list(iobj.all_nodes)
8122

    
8123

    
8124
class LUInstanceConsole(NoHooksLU):
8125
  """Connect to an instance's console.
8126

8127
  This is somewhat special in that it returns the command line that
8128
  you need to run on the master node in order to connect to the
8129
  console.
8130

8131
  """
8132
  REQ_BGL = False
8133

    
8134
  def ExpandNames(self):
8135
    self._ExpandAndLockInstance()
8136

    
8137
  def CheckPrereq(self):
8138
    """Check prerequisites.
8139

8140
    This checks that the instance is in the cluster.
8141

8142
    """
8143
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
8144
    assert self.instance is not None, \
8145
      "Cannot retrieve locked instance %s" % self.op.instance_name
8146
    _CheckNodeOnline(self, self.instance.primary_node)
8147

    
8148
  def Exec(self, feedback_fn):
8149
    """Connect to the console of an instance
8150

8151
    """
8152
    instance = self.instance
8153
    node = instance.primary_node
8154

    
8155
    node_insts = self.rpc.call_instance_list([node],
8156
                                             [instance.hypervisor])[node]
8157
    node_insts.Raise("Can't get node information from %s" % node)
8158

    
8159
    if instance.name not in node_insts.payload:
8160
      if instance.admin_up:
8161
        state = constants.INSTST_ERRORDOWN
8162
      else:
8163
        state = constants.INSTST_ADMINDOWN
8164
      raise errors.OpExecError("Instance %s is not running (state %s)" %
8165
                               (instance.name, state))
8166

    
8167
    logging.debug("Connecting to console of %s on %s", instance.name, node)
8168

    
8169
    return _GetInstanceConsole(self.cfg.GetClusterInfo(), instance)
8170

    
8171

    
8172
def _GetInstanceConsole(cluster, instance):
8173
  """Returns console information for an instance.
8174

8175
  @type cluster: L{objects.Cluster}
8176
  @type instance: L{objects.Instance}
8177
  @rtype: dict
8178

8179
  """
8180
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
8181
  # beparams and hvparams are passed separately, to avoid editing the
8182
  # instance and then saving the defaults in the instance itself.
8183
  hvparams = cluster.FillHV(instance)
8184
  beparams = cluster.FillBE(instance)
8185
  console = hyper.GetInstanceConsole(instance, hvparams, beparams)
8186

    
8187
  assert console.instance == instance.name
8188
  assert console.Validate()
8189

    
8190
  return console.ToDict()
8191

    
8192

    
8193
class LUInstanceReplaceDisks(LogicalUnit):
8194
  """Replace the disks of an instance.
8195

8196
  """
8197
  HPATH = "mirrors-replace"
8198
  HTYPE = constants.HTYPE_INSTANCE
8199
  REQ_BGL = False
8200

    
8201
  def CheckArguments(self):
8202
    TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
8203
                                  self.op.iallocator)
8204

    
8205
  def ExpandNames(self):
8206
    self._ExpandAndLockInstance()
8207

    
8208
    if self.op.iallocator is not None:
8209
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
8210

    
8211
    elif self.op.remote_node is not None:
8212
      remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
8213
      self.op.remote_node = remote_node
8214

    
8215
      # Warning: do not remove the locking of the new secondary here
8216
      # unless DRBD8.AddChildren is changed to work in parallel;
8217
      # currently it doesn't since parallel invocations of
8218
      # FindUnusedMinor will conflict
8219
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
8220
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
8221

    
8222
    else:
8223
      self.needed_locks[locking.LEVEL_NODE] = []
8224
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8225

    
8226
    self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
8227
                                   self.op.iallocator, self.op.remote_node,
8228
                                   self.op.disks, False, self.op.early_release)
8229

    
8230
    self.tasklets = [self.replacer]
8231

    
8232
  def DeclareLocks(self, level):
8233
    # If we're not already locking all nodes in the set we have to declare the
8234
    # instance's primary/secondary nodes.
8235
    if (level == locking.LEVEL_NODE and
8236
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
8237
      self._LockInstancesNodes()
8238

    
8239
  def BuildHooksEnv(self):
8240
    """Build hooks env.
8241

8242
    This runs on the master, the primary and all the secondaries.
8243

8244
    """
8245
    instance = self.replacer.instance
8246
    env = {
8247
      "MODE": self.op.mode,
8248
      "NEW_SECONDARY": self.op.remote_node,
8249
      "OLD_SECONDARY": instance.secondary_nodes[0],
8250
      }
8251
    env.update(_BuildInstanceHookEnvByObject(self, instance))
8252
    nl = [
8253
      self.cfg.GetMasterNode(),
8254
      instance.primary_node,
8255
      ]
8256
    if self.op.remote_node is not None:
8257
      nl.append(self.op.remote_node)
8258
    return env, nl, nl
8259

    
8260

    
8261
class TLReplaceDisks(Tasklet):
8262
  """Replaces disks for an instance.
8263

8264
  Note: Locking is not within the scope of this class.
8265

8266
  """
8267
  def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
8268
               disks, delay_iallocator, early_release):
8269
    """Initializes this class.
8270

8271
    """
8272
    Tasklet.__init__(self, lu)
8273

    
8274
    # Parameters
8275
    self.instance_name = instance_name
8276
    self.mode = mode
8277
    self.iallocator_name = iallocator_name
8278
    self.remote_node = remote_node
8279
    self.disks = disks
8280
    self.delay_iallocator = delay_iallocator
8281
    self.early_release = early_release
8282

    
8283
    # Runtime data
8284
    self.instance = None
8285
    self.new_node = None
8286
    self.target_node = None
8287
    self.other_node = None
8288
    self.remote_node_info = None
8289
    self.node_secondary_ip = None
8290

    
8291
  @staticmethod
8292
  def CheckArguments(mode, remote_node, iallocator):
8293
    """Helper function for users of this class.
8294

8295
    """
8296
    # check for valid parameter combination
8297
    if mode == constants.REPLACE_DISK_CHG:
8298
      if remote_node is None and iallocator is None:
8299
        raise errors.OpPrereqError("When changing the secondary either an"
8300
                                   " iallocator script must be used or the"
8301
                                   " new node given", errors.ECODE_INVAL)
8302

    
8303
      if remote_node is not None and iallocator is not None:
8304
        raise errors.OpPrereqError("Give either the iallocator or the new"
8305
                                   " secondary, not both", errors.ECODE_INVAL)
8306

    
8307
    elif remote_node is not None or iallocator is not None:
8308
      # Not replacing the secondary
8309
      raise errors.OpPrereqError("The iallocator and new node options can"
8310
                                 " only be used when changing the"
8311
                                 " secondary node", errors.ECODE_INVAL)
8312

    
8313
  @staticmethod
8314
  def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
8315
    """Compute a new secondary node using an IAllocator.
8316

8317
    """
8318
    ial = IAllocator(lu.cfg, lu.rpc,
8319
                     mode=constants.IALLOCATOR_MODE_RELOC,
8320
                     name=instance_name,
8321
                     relocate_from=relocate_from)
8322

    
8323
    ial.Run(iallocator_name)
8324

    
8325
    if not ial.success:
8326
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
8327
                                 " %s" % (iallocator_name, ial.info),
8328
                                 errors.ECODE_NORES)
8329

    
8330
    if len(ial.result) != ial.required_nodes:
8331
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
8332
                                 " of nodes (%s), required %s" %
8333
                                 (iallocator_name,
8334
                                  len(ial.result), ial.required_nodes),
8335
                                 errors.ECODE_FAULT)
8336

    
8337
    remote_node_name = ial.result[0]
8338

    
8339
    lu.LogInfo("Selected new secondary for instance '%s': %s",
8340
               instance_name, remote_node_name)
8341

    
8342
    return remote_node_name
8343

    
8344
  def _FindFaultyDisks(self, node_name):
8345
    return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
8346
                                    node_name, True)
8347

    
8348
  def CheckPrereq(self):
8349
    """Check prerequisites.
8350

8351
    This checks that the instance is in the cluster.
8352

8353
    """
8354
    self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
8355
    assert instance is not None, \
8356
      "Cannot retrieve locked instance %s" % self.instance_name
8357

    
8358
    if instance.disk_template != constants.DT_DRBD8:
8359
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
8360
                                 " instances", errors.ECODE_INVAL)
8361

    
8362
    if len(instance.secondary_nodes) != 1:
8363
      raise errors.OpPrereqError("The instance has a strange layout,"
8364
                                 " expected one secondary but found %d" %
8365
                                 len(instance.secondary_nodes),
8366
                                 errors.ECODE_FAULT)
8367

    
8368
    if not self.delay_iallocator:
8369
      self._CheckPrereq2()
8370

    
8371
  def _CheckPrereq2(self):
8372
    """Check prerequisites, second part.
8373

8374
    This function should always be part of CheckPrereq. It was separated and is
8375
    now called from Exec because during node evacuation iallocator was only
8376
    called with an unmodified cluster model, not taking planned changes into
8377
    account.
8378

8379
    """
8380
    instance = self.instance
8381
    secondary_node = instance.secondary_nodes[0]
8382

    
8383
    if self.iallocator_name is None:
8384
      remote_node = self.remote_node
8385
    else:
8386
      remote_node = self._RunAllocator(self.lu, self.iallocator_name,
8387
                                       instance.name, instance.secondary_nodes)
8388

    
8389
    if remote_node is not None:
8390
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
8391
      assert self.remote_node_info is not None, \
8392
        "Cannot retrieve locked node %s" % remote_node
8393
    else:
8394
      self.remote_node_info = None
8395

    
8396
    if remote_node == self.instance.primary_node:
8397
      raise errors.OpPrereqError("The specified node is the primary node of"
8398
                                 " the instance.", errors.ECODE_INVAL)
8399

    
8400
    if remote_node == secondary_node:
8401
      raise errors.OpPrereqError("The specified node is already the"
8402
                                 " secondary node of the instance.",
8403
                                 errors.ECODE_INVAL)
8404

    
8405
    if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
8406
                                    constants.REPLACE_DISK_CHG):
8407
      raise errors.OpPrereqError("Cannot specify disks to be replaced",
8408
                                 errors.ECODE_INVAL)
8409

    
8410
    if self.mode == constants.REPLACE_DISK_AUTO:
8411
      faulty_primary = self._FindFaultyDisks(instance.primary_node)
8412
      faulty_secondary = self._FindFaultyDisks(secondary_node)
8413

    
8414
      if faulty_primary and faulty_secondary:
8415
        raise errors.OpPrereqError("Instance %s has faulty disks on more than"
8416
                                   " one node and can not be repaired"
8417
                                   " automatically" % self.instance_name,
8418
                                   errors.ECODE_STATE)
8419

    
8420
      if faulty_primary:
8421
        self.disks = faulty_primary
8422
        self.target_node = instance.primary_node
8423
        self.other_node = secondary_node
8424
        check_nodes = [self.target_node, self.other_node]
8425
      elif faulty_secondary:
8426
        self.disks = faulty_secondary
8427
        self.target_node = secondary_node
8428
        self.other_node = instance.primary_node
8429
        check_nodes = [self.target_node, self.other_node]
8430
      else:
8431
        self.disks = []
8432
        check_nodes = []
8433

    
8434
    else:
8435
      # Non-automatic modes
8436
      if self.mode == constants.REPLACE_DISK_PRI:
8437
        self.target_node = instance.primary_node
8438
        self.other_node = secondary_node
8439
        check_nodes = [self.target_node, self.other_node]
8440

    
8441
      elif self.mode == constants.REPLACE_DISK_SEC:
8442
        self.target_node = secondary_node
8443
        self.other_node = instance.primary_node
8444
        check_nodes = [self.target_node, self.other_node]
8445

    
8446
      elif self.mode == constants.REPLACE_DISK_CHG:
8447
        self.new_node = remote_node
8448
        self.other_node = instance.primary_node
8449
        self.target_node = secondary_node
8450
        check_nodes = [self.new_node, self.other_node]
8451

    
8452
        _CheckNodeNotDrained(self.lu, remote_node)
8453
        _CheckNodeVmCapable(self.lu, remote_node)
8454

    
8455
        old_node_info = self.cfg.GetNodeInfo(secondary_node)
8456
        assert old_node_info is not None
8457
        if old_node_info.offline and not self.early_release:
8458
          # doesn't make sense to delay the release
8459
          self.early_release = True
8460
          self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
8461
                          " early-release mode", secondary_node)
8462

    
8463
      else:
8464
        raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
8465
                                     self.mode)
8466

    
8467
      # If not specified all disks should be replaced
8468
      if not self.disks:
8469
        self.disks = range(len(self.instance.disks))
8470

    
8471
    for node in check_nodes:
8472
      _CheckNodeOnline(self.lu, node)
8473

    
8474
    # Check whether disks are valid
8475
    for disk_idx in self.disks:
8476
      instance.FindDisk(disk_idx)
8477

    
8478
    # Get secondary node IP addresses
8479
    node_2nd_ip = {}
8480

    
8481
    for node_name in [self.target_node, self.other_node, self.new_node]:
8482
      if node_name is not None:
8483
        node_2nd_ip[node_name] = self.cfg.GetNodeInfo(node_name).secondary_ip
8484

    
8485
    self.node_secondary_ip = node_2nd_ip
8486

    
8487
  def Exec(self, feedback_fn):
8488
    """Execute disk replacement.
8489

8490
    This dispatches the disk replacement to the appropriate handler.
8491

8492
    """
8493
    if self.delay_iallocator:
8494
      self._CheckPrereq2()
8495

    
8496
    if not self.disks:
8497
      feedback_fn("No disks need replacement")
8498
      return
8499

    
8500
    feedback_fn("Replacing disk(s) %s for %s" %
8501
                (utils.CommaJoin(self.disks), self.instance.name))
8502

    
8503
    activate_disks = (not self.instance.admin_up)
8504

    
8505
    # Activate the instance disks if we're replacing them on a down instance
8506
    if activate_disks:
8507
      _StartInstanceDisks(self.lu, self.instance, True)
8508

    
8509
    try:
8510
      # Should we replace the secondary node?
8511
      if self.new_node is not None:
8512
        fn = self._ExecDrbd8Secondary
8513
      else:
8514
        fn = self._ExecDrbd8DiskOnly
8515

    
8516
      return fn(feedback_fn)
8517

    
8518
    finally:
8519
      # Deactivate the instance disks if we're replacing them on a
8520
      # down instance
8521
      if activate_disks:
8522
        _SafeShutdownInstanceDisks(self.lu, self.instance)
8523

    
8524
  def _CheckVolumeGroup(self, nodes):
8525
    self.lu.LogInfo("Checking volume groups")
8526

    
8527
    vgname = self.cfg.GetVGName()
8528

    
8529
    # Make sure volume group exists on all involved nodes
8530
    results = self.rpc.call_vg_list(nodes)
8531
    if not results:
8532
      raise errors.OpExecError("Can't list volume groups on the nodes")
8533

    
8534
    for node in nodes:
8535
      res = results[node]
8536
      res.Raise("Error checking node %s" % node)
8537
      if vgname not in res.payload:
8538
        raise errors.OpExecError("Volume group '%s' not found on node %s" %
8539
                                 (vgname, node))
8540

    
8541
  def _CheckDisksExistence(self, nodes):
8542
    # Check disk existence
8543
    for idx, dev in enumerate(self.instance.disks):
8544
      if idx not in self.disks:
8545
        continue
8546

    
8547
      for node in nodes:
8548
        self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
8549
        self.cfg.SetDiskID(dev, node)
8550

    
8551
        result = self.rpc.call_blockdev_find(node, dev)
8552

    
8553
        msg = result.fail_msg
8554
        if msg or not result.payload:
8555
          if not msg:
8556
            msg = "disk not found"
8557
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
8558
                                   (idx, node, msg))
8559

    
8560
  def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
8561
    for idx, dev in enumerate(self.instance.disks):
8562
      if idx not in self.disks:
8563
        continue
8564

    
8565
      self.lu.LogInfo("Checking disk/%d consistency on node %s" %
8566
                      (idx, node_name))
8567

    
8568
      if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary,
8569
                                   ldisk=ldisk):
8570
        raise errors.OpExecError("Node %s has degraded storage, unsafe to"
8571
                                 " replace disks for instance %s" %
8572
                                 (node_name, self.instance.name))
8573

    
8574
  def _CreateNewStorage(self, node_name):
8575
    vgname = self.cfg.GetVGName()
8576
    iv_names = {}
8577

    
8578
    for idx, dev in enumerate(self.instance.disks):
8579
      if idx not in self.disks:
8580
        continue
8581

    
8582
      self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
8583

    
8584
      self.cfg.SetDiskID(dev, node_name)
8585

    
8586
      lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
8587
      names = _GenerateUniqueNames(self.lu, lv_names)
8588

    
8589
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
8590
                             logical_id=(vgname, names[0]))
8591
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
8592
                             logical_id=(vgname, names[1]))
8593

    
8594
      new_lvs = [lv_data, lv_meta]
8595
      old_lvs = dev.children
8596
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
8597

    
8598
      # we pass force_create=True to force the LVM creation
8599
      for new_lv in new_lvs:
8600
        _CreateBlockDev(self.lu, node_name, self.instance, new_lv, True,
8601
                        _GetInstanceInfoText(self.instance), False)
8602

    
8603
    return iv_names
8604

    
8605
  def _CheckDevices(self, node_name, iv_names):
8606
    for name, (dev, _, _) in iv_names.iteritems():
8607
      self.cfg.SetDiskID(dev, node_name)
8608

    
8609
      result = self.rpc.call_blockdev_find(node_name, dev)
8610

    
8611
      msg = result.fail_msg
8612
      if msg or not result.payload:
8613
        if not msg:
8614
          msg = "disk not found"
8615
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
8616
                                 (name, msg))
8617

    
8618
      if result.payload.is_degraded:
8619
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
8620

    
8621
  def _RemoveOldStorage(self, node_name, iv_names):
8622
    for name, (_, old_lvs, _) in iv_names.iteritems():
8623
      self.lu.LogInfo("Remove logical volumes for %s" % name)
8624

    
8625
      for lv in old_lvs:
8626
        self.cfg.SetDiskID(lv, node_name)
8627

    
8628
        msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
8629
        if msg:
8630
          self.lu.LogWarning("Can't remove old LV: %s" % msg,
8631
                             hint="remove unused LVs manually")
8632

    
8633
  def _ReleaseNodeLock(self, node_name):
8634
    """Releases the lock for a given node."""
8635
    self.lu.context.glm.release(locking.LEVEL_NODE, node_name)
8636

    
8637
  def _ExecDrbd8DiskOnly(self, feedback_fn):
8638
    """Replace a disk on the primary or secondary for DRBD 8.
8639

8640
    The algorithm for replace is quite complicated:
8641

8642
      1. for each disk to be replaced:
8643

8644
        1. create new LVs on the target node with unique names
8645
        1. detach old LVs from the drbd device
8646
        1. rename old LVs to name_replaced.<time_t>
8647
        1. rename new LVs to old LVs
8648
        1. attach the new LVs (with the old names now) to the drbd device
8649

8650
      1. wait for sync across all devices
8651

8652
      1. for each modified disk:
8653

8654
        1. remove old LVs (which have the name name_replaces.<time_t>)
8655

8656
    Failures are not very well handled.
8657

8658
    """
8659
    steps_total = 6
8660

    
8661
    # Step: check device activation
8662
    self.lu.LogStep(1, steps_total, "Check device existence")
8663
    self._CheckDisksExistence([self.other_node, self.target_node])
8664
    self._CheckVolumeGroup([self.target_node, self.other_node])
8665

    
8666
    # Step: check other node consistency
8667
    self.lu.LogStep(2, steps_total, "Check peer consistency")
8668
    self._CheckDisksConsistency(self.other_node,
8669
                                self.other_node == self.instance.primary_node,
8670
                                False)
8671

    
8672
    # Step: create new storage
8673
    self.lu.LogStep(3, steps_total, "Allocate new storage")
8674
    iv_names = self._CreateNewStorage(self.target_node)
8675

    
8676
    # Step: for each lv, detach+rename*2+attach
8677
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
8678
    for dev, old_lvs, new_lvs in iv_names.itervalues():
8679
      self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
8680

    
8681
      result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
8682
                                                     old_lvs)
8683
      result.Raise("Can't detach drbd from local storage on node"
8684
                   " %s for device %s" % (self.target_node, dev.iv_name))
8685
      #dev.children = []
8686
      #cfg.Update(instance)
8687

    
8688
      # ok, we created the new LVs, so now we know we have the needed
8689
      # storage; as such, we proceed on the target node to rename
8690
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
8691
      # using the assumption that logical_id == physical_id (which in
8692
      # turn is the unique_id on that node)
8693

    
8694
      # FIXME(iustin): use a better name for the replaced LVs
8695
      temp_suffix = int(time.time())
8696
      ren_fn = lambda d, suff: (d.physical_id[0],
8697
                                d.physical_id[1] + "_replaced-%s" % suff)
8698

    
8699
      # Build the rename list based on what LVs exist on the node
8700
      rename_old_to_new = []
8701
      for to_ren in old_lvs:
8702
        result = self.rpc.call_blockdev_find(self.target_node, to_ren)
8703
        if not result.fail_msg and result.payload:
8704
          # device exists
8705
          rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
8706

    
8707
      self.lu.LogInfo("Renaming the old LVs on the target node")
8708
      result = self.rpc.call_blockdev_rename(self.target_node,
8709
                                             rename_old_to_new)
8710
      result.Raise("Can't rename old LVs on node %s" % self.target_node)
8711

    
8712
      # Now we rename the new LVs to the old LVs
8713
      self.lu.LogInfo("Renaming the new LVs on the target node")
8714
      rename_new_to_old = [(new, old.physical_id)
8715
                           for old, new in zip(old_lvs, new_lvs)]
8716
      result = self.rpc.call_blockdev_rename(self.target_node,
8717
                                             rename_new_to_old)
8718
      result.Raise("Can't rename new LVs on node %s" % self.target_node)
8719

    
8720
      for old, new in zip(old_lvs, new_lvs):
8721
        new.logical_id = old.logical_id
8722
        self.cfg.SetDiskID(new, self.target_node)
8723

    
8724
      for disk in old_lvs:
8725
        disk.logical_id = ren_fn(disk, temp_suffix)
8726
        self.cfg.SetDiskID(disk, self.target_node)
8727

    
8728
      # Now that the new lvs have the old name, we can add them to the device
8729
      self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
8730
      result = self.rpc.call_blockdev_addchildren(self.target_node, dev,
8731
                                                  new_lvs)
8732
      msg = result.fail_msg
8733
      if msg:
8734
        for new_lv in new_lvs:
8735
          msg2 = self.rpc.call_blockdev_remove(self.target_node,
8736
                                               new_lv).fail_msg
8737
          if msg2:
8738
            self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
8739
                               hint=("cleanup manually the unused logical"
8740
                                     "volumes"))
8741
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
8742

    
8743
      dev.children = new_lvs
8744

    
8745
      self.cfg.Update(self.instance, feedback_fn)
8746

    
8747
    cstep = 5
8748
    if self.early_release:
8749
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
8750
      cstep += 1
8751
      self._RemoveOldStorage(self.target_node, iv_names)
8752
      # WARNING: we release both node locks here, do not do other RPCs
8753
      # than WaitForSync to the primary node
8754
      self._ReleaseNodeLock([self.target_node, self.other_node])
8755

    
8756
    # Wait for sync
8757
    # This can fail as the old devices are degraded and _WaitForSync
8758
    # does a combined result over all disks, so we don't check its return value
8759
    self.lu.LogStep(cstep, steps_total, "Sync devices")
8760
    cstep += 1
8761
    _WaitForSync(self.lu, self.instance)
8762

    
8763
    # Check all devices manually
8764
    self._CheckDevices(self.instance.primary_node, iv_names)
8765

    
8766
    # Step: remove old storage
8767
    if not self.early_release:
8768
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
8769
      cstep += 1
8770
      self._RemoveOldStorage(self.target_node, iv_names)
8771

    
8772
  def _ExecDrbd8Secondary(self, feedback_fn):
8773
    """Replace the secondary node for DRBD 8.
8774

8775
    The algorithm for replace is quite complicated:
8776
      - for all disks of the instance:
8777
        - create new LVs on the new node with same names
8778
        - shutdown the drbd device on the old secondary
8779
        - disconnect the drbd network on the primary
8780
        - create the drbd device on the new secondary
8781
        - network attach the drbd on the primary, using an artifice:
8782
          the drbd code for Attach() will connect to the network if it
8783
          finds a device which is connected to the good local disks but
8784
          not network enabled
8785
      - wait for sync across all devices
8786
      - remove all disks from the old secondary
8787

8788
    Failures are not very well handled.
8789

8790
    """
8791
    steps_total = 6
8792

    
8793
    # Step: check device activation
8794
    self.lu.LogStep(1, steps_total, "Check device existence")
8795
    self._CheckDisksExistence([self.instance.primary_node])
8796
    self._CheckVolumeGroup([self.instance.primary_node])
8797

    
8798
    # Step: check other node consistency
8799
    self.lu.LogStep(2, steps_total, "Check peer consistency")
8800
    self._CheckDisksConsistency(self.instance.primary_node, True, True)
8801

    
8802
    # Step: create new storage
8803
    self.lu.LogStep(3, steps_total, "Allocate new storage")
8804
    for idx, dev in enumerate(self.instance.disks):
8805
      self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
8806
                      (self.new_node, idx))
8807
      # we pass force_create=True to force LVM creation
8808
      for new_lv in dev.children:
8809
        _CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True,
8810
                        _GetInstanceInfoText(self.instance), False)
8811

    
8812
    # Step 4: dbrd minors and drbd setups changes
8813
    # after this, we must manually remove the drbd minors on both the
8814
    # error and the success paths
8815
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
8816
    minors = self.cfg.AllocateDRBDMinor([self.new_node
8817
                                         for dev in self.instance.disks],
8818
                                        self.instance.name)
8819
    logging.debug("Allocated minors %r", minors)
8820

    
8821
    iv_names = {}
8822
    for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
8823
      self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
8824
                      (self.new_node, idx))
8825
      # create new devices on new_node; note that we create two IDs:
8826
      # one without port, so the drbd will be activated without
8827
      # networking information on the new node at this stage, and one
8828
      # with network, for the latter activation in step 4
8829
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
8830
      if self.instance.primary_node == o_node1:
8831
        p_minor = o_minor1
8832
      else:
8833
        assert self.instance.primary_node == o_node2, "Three-node instance?"
8834
        p_minor = o_minor2
8835

    
8836
      new_alone_id = (self.instance.primary_node, self.new_node, None,
8837
                      p_minor, new_minor, o_secret)
8838
      new_net_id = (self.instance.primary_node, self.new_node, o_port,
8839
                    p_minor, new_minor, o_secret)
8840

    
8841
      iv_names[idx] = (dev, dev.children, new_net_id)
8842
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
8843
                    new_net_id)
8844
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
8845
                              logical_id=new_alone_id,
8846
                              children=dev.children,
8847
                              size=dev.size)
8848
      try:
8849
        _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
8850
                              _GetInstanceInfoText(self.instance), False)
8851
      except errors.GenericError:
8852
        self.cfg.ReleaseDRBDMinors(self.instance.name)
8853
        raise
8854

    
8855
    # We have new devices, shutdown the drbd on the old secondary
8856
    for idx, dev in enumerate(self.instance.disks):
8857
      self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
8858
      self.cfg.SetDiskID(dev, self.target_node)
8859
      msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
8860
      if msg:
8861
        self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
8862
                           "node: %s" % (idx, msg),
8863
                           hint=("Please cleanup this device manually as"
8864
                                 " soon as possible"))
8865

    
8866
    self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
8867
    result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node],
8868
                                               self.node_secondary_ip,
8869
                                               self.instance.disks)\
8870
                                              [self.instance.primary_node]
8871

    
8872
    msg = result.fail_msg
8873
    if msg:
8874
      # detaches didn't succeed (unlikely)
8875
      self.cfg.ReleaseDRBDMinors(self.instance.name)
8876
      raise errors.OpExecError("Can't detach the disks from the network on"
8877
                               " old node: %s" % (msg,))
8878

    
8879
    # if we managed to detach at least one, we update all the disks of
8880
    # the instance to point to the new secondary
8881
    self.lu.LogInfo("Updating instance configuration")
8882
    for dev, _, new_logical_id in iv_names.itervalues():
8883
      dev.logical_id = new_logical_id
8884
      self.cfg.SetDiskID(dev, self.instance.primary_node)
8885

    
8886
    self.cfg.Update(self.instance, feedback_fn)
8887

    
8888
    # and now perform the drbd attach
8889
    self.lu.LogInfo("Attaching primary drbds to new secondary"
8890
                    " (standalone => connected)")
8891
    result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
8892
                                            self.new_node],
8893
                                           self.node_secondary_ip,
8894
                                           self.instance.disks,
8895
                                           self.instance.name,
8896
                                           False)
8897
    for to_node, to_result in result.items():
8898
      msg = to_result.fail_msg
8899
      if msg:
8900
        self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
8901
                           to_node, msg,
8902
                           hint=("please do a gnt-instance info to see the"
8903
                                 " status of disks"))
8904
    cstep = 5
8905
    if self.early_release:
8906
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
8907
      cstep += 1
8908
      self._RemoveOldStorage(self.target_node, iv_names)
8909
      # WARNING: we release all node locks here, do not do other RPCs
8910
      # than WaitForSync to the primary node
8911
      self._ReleaseNodeLock([self.instance.primary_node,
8912
                             self.target_node,
8913
                             self.new_node])
8914

    
8915
    # Wait for sync
8916
    # This can fail as the old devices are degraded and _WaitForSync
8917
    # does a combined result over all disks, so we don't check its return value
8918
    self.lu.LogStep(cstep, steps_total, "Sync devices")
8919
    cstep += 1
8920
    _WaitForSync(self.lu, self.instance)
8921

    
8922
    # Check all devices manually
8923
    self._CheckDevices(self.instance.primary_node, iv_names)
8924

    
8925
    # Step: remove old storage
8926
    if not self.early_release:
8927
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
8928
      self._RemoveOldStorage(self.target_node, iv_names)
8929

    
8930

    
8931
class LURepairNodeStorage(NoHooksLU):
8932
  """Repairs the volume group on a node.
8933

8934
  """
8935
  REQ_BGL = False
8936

    
8937
  def CheckArguments(self):
8938
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
8939

    
8940
    storage_type = self.op.storage_type
8941

    
8942
    if (constants.SO_FIX_CONSISTENCY not in
8943
        constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
8944
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
8945
                                 " repaired" % storage_type,
8946
                                 errors.ECODE_INVAL)
8947

    
8948
  def ExpandNames(self):
8949
    self.needed_locks = {
8950
      locking.LEVEL_NODE: [self.op.node_name],
8951
      }
8952

    
8953
  def _CheckFaultyDisks(self, instance, node_name):
8954
    """Ensure faulty disks abort the opcode or at least warn."""
8955
    try:
8956
      if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
8957
                                  node_name, True):
8958
        raise errors.OpPrereqError("Instance '%s' has faulty disks on"
8959
                                   " node '%s'" % (instance.name, node_name),
8960
                                   errors.ECODE_STATE)
8961
    except errors.OpPrereqError, err:
8962
      if self.op.ignore_consistency:
8963
        self.proc.LogWarning(str(err.args[0]))
8964
      else:
8965
        raise
8966

    
8967
  def CheckPrereq(self):
8968
    """Check prerequisites.
8969

8970
    """
8971
    # Check whether any instance on this node has faulty disks
8972
    for inst in _GetNodeInstances(self.cfg, self.op.node_name):
8973
      if not inst.admin_up:
8974
        continue
8975
      check_nodes = set(inst.all_nodes)
8976
      check_nodes.discard(self.op.node_name)
8977
      for inst_node_name in check_nodes:
8978
        self._CheckFaultyDisks(inst, inst_node_name)
8979

    
8980
  def Exec(self, feedback_fn):
8981
    feedback_fn("Repairing storage unit '%s' on %s ..." %
8982
                (self.op.name, self.op.node_name))
8983

    
8984
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
8985
    result = self.rpc.call_storage_execute(self.op.node_name,
8986
                                           self.op.storage_type, st_args,
8987
                                           self.op.name,
8988
                                           constants.SO_FIX_CONSISTENCY)
8989
    result.Raise("Failed to repair storage unit '%s' on %s" %
8990
                 (self.op.name, self.op.node_name))
8991

    
8992

    
8993
class LUNodeEvacStrategy(NoHooksLU):
8994
  """Computes the node evacuation strategy.
8995

8996
  """
8997
  REQ_BGL = False
8998

    
8999
  def CheckArguments(self):
9000
    _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
9001

    
9002
  def ExpandNames(self):
9003
    self.op.nodes = _GetWantedNodes(self, self.op.nodes)
9004
    self.needed_locks = locks = {}
9005
    if self.op.remote_node is None:
9006
      locks[locking.LEVEL_NODE] = locking.ALL_SET
9007
    else:
9008
      self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
9009
      locks[locking.LEVEL_NODE] = self.op.nodes + [self.op.remote_node]
9010

    
9011
  def Exec(self, feedback_fn):
9012
    if self.op.remote_node is not None:
9013
      instances = []
9014
      for node in self.op.nodes:
9015
        instances.extend(_GetNodeSecondaryInstances(self.cfg, node))
9016
      result = []
9017
      for i in instances:
9018
        if i.primary_node == self.op.remote_node:
9019
          raise errors.OpPrereqError("Node %s is the primary node of"
9020
                                     " instance %s, cannot use it as"
9021
                                     " secondary" %
9022
                                     (self.op.remote_node, i.name),
9023
                                     errors.ECODE_INVAL)
9024
        result.append([i.name, self.op.remote_node])
9025
    else:
9026
      ial = IAllocator(self.cfg, self.rpc,
9027
                       mode=constants.IALLOCATOR_MODE_MEVAC,
9028
                       evac_nodes=self.op.nodes)
9029
      ial.Run(self.op.iallocator, validate=True)
9030
      if not ial.success:
9031
        raise errors.OpExecError("No valid evacuation solution: %s" % ial.info,
9032
                                 errors.ECODE_NORES)
9033
      result = ial.result
9034
    return result
9035

    
9036

    
9037
class LUInstanceGrowDisk(LogicalUnit):
9038
  """Grow a disk of an instance.
9039

9040
  """
9041
  HPATH = "disk-grow"
9042
  HTYPE = constants.HTYPE_INSTANCE
9043
  REQ_BGL = False
9044

    
9045
  def ExpandNames(self):
9046
    self._ExpandAndLockInstance()
9047
    self.needed_locks[locking.LEVEL_NODE] = []
9048
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
9049

    
9050
  def DeclareLocks(self, level):
9051
    if level == locking.LEVEL_NODE:
9052
      self._LockInstancesNodes()
9053

    
9054
  def BuildHooksEnv(self):
9055
    """Build hooks env.
9056

9057
    This runs on the master, the primary and all the secondaries.
9058

9059
    """
9060
    env = {
9061
      "DISK": self.op.disk,
9062
      "AMOUNT": self.op.amount,
9063
      }
9064
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
9065
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
9066
    return env, nl, nl
9067

    
9068
  def CheckPrereq(self):
9069
    """Check prerequisites.
9070

9071
    This checks that the instance is in the cluster.
9072

9073
    """
9074
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
9075
    assert instance is not None, \
9076
      "Cannot retrieve locked instance %s" % self.op.instance_name
9077
    nodenames = list(instance.all_nodes)
9078
    for node in nodenames:
9079
      _CheckNodeOnline(self, node)
9080

    
9081
    self.instance = instance
9082

    
9083
    if instance.disk_template not in constants.DTS_GROWABLE:
9084
      raise errors.OpPrereqError("Instance's disk layout does not support"
9085
                                 " growing.", errors.ECODE_INVAL)
9086

    
9087
    self.disk = instance.FindDisk(self.op.disk)
9088

    
9089
    if instance.disk_template not in (constants.DT_FILE,
9090
                                      constants.DT_SHARED_FILE):
9091
      # TODO: check the free disk space for file, when that feature will be
9092
      # supported
9093
      _CheckNodesFreeDiskPerVG(self, nodenames,
9094
                               self.disk.ComputeGrowth(self.op.amount))
9095

    
9096
  def Exec(self, feedback_fn):
9097
    """Execute disk grow.
9098

9099
    """
9100
    instance = self.instance
9101
    disk = self.disk
9102

    
9103
    disks_ok, _ = _AssembleInstanceDisks(self, self.instance, disks=[disk])
9104
    if not disks_ok:
9105
      raise errors.OpExecError("Cannot activate block device to grow")
9106

    
9107
    for node in instance.all_nodes:
9108
      self.cfg.SetDiskID(disk, node)
9109
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
9110
      result.Raise("Grow request failed to node %s" % node)
9111

    
9112
      # TODO: Rewrite code to work properly
9113
      # DRBD goes into sync mode for a short amount of time after executing the
9114
      # "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby
9115
      # calling "resize" in sync mode fails. Sleeping for a short amount of
9116
      # time is a work-around.
9117
      time.sleep(5)
9118

    
9119
    disk.RecordGrow(self.op.amount)
9120
    self.cfg.Update(instance, feedback_fn)
9121
    if self.op.wait_for_sync:
9122
      disk_abort = not _WaitForSync(self, instance, disks=[disk])
9123
      if disk_abort:
9124
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
9125
                             " status.\nPlease check the instance.")
9126
      if not instance.admin_up:
9127
        _SafeShutdownInstanceDisks(self, instance, disks=[disk])
9128
    elif not instance.admin_up:
9129
      self.proc.LogWarning("Not shutting down the disk even if the instance is"
9130
                           " not supposed to be running because no wait for"
9131
                           " sync mode was requested.")
9132

    
9133

    
9134
class LUInstanceQueryData(NoHooksLU):
9135
  """Query runtime instance data.
9136

9137
  """
9138
  REQ_BGL = False
9139

    
9140
  def ExpandNames(self):
9141
    self.needed_locks = {}
9142
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
9143

    
9144
    if self.op.instances:
9145
      self.wanted_names = []
9146
      for name in self.op.instances:
9147
        full_name = _ExpandInstanceName(self.cfg, name)
9148
        self.wanted_names.append(full_name)
9149
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
9150
    else:
9151
      self.wanted_names = None
9152
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
9153

    
9154
    self.needed_locks[locking.LEVEL_NODE] = []
9155
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
9156

    
9157
  def DeclareLocks(self, level):
9158
    if level == locking.LEVEL_NODE:
9159
      self._LockInstancesNodes()
9160

    
9161
  def CheckPrereq(self):
9162
    """Check prerequisites.
9163

9164
    This only checks the optional instance list against the existing names.
9165

9166
    """
9167
    if self.wanted_names is None:
9168
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
9169

    
9170
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
9171
                             in self.wanted_names]
9172

    
9173
  def _ComputeBlockdevStatus(self, node, instance_name, dev):
9174
    """Returns the status of a block device
9175

9176
    """
9177
    if self.op.static or not node:
9178
      return None
9179

    
9180
    self.cfg.SetDiskID(dev, node)
9181

    
9182
    result = self.rpc.call_blockdev_find(node, dev)
9183
    if result.offline:
9184
      return None
9185

    
9186
    result.Raise("Can't compute disk status for %s" % instance_name)
9187

    
9188
    status = result.payload
9189
    if status is None:
9190
      return None
9191

    
9192
    return (status.dev_path, status.major, status.minor,
9193
            status.sync_percent, status.estimated_time,
9194
            status.is_degraded, status.ldisk_status)
9195

    
9196
  def _ComputeDiskStatus(self, instance, snode, dev):
9197
    """Compute block device status.
9198

9199
    """
9200
    if dev.dev_type in constants.LDS_DRBD:
9201
      # we change the snode then (otherwise we use the one passed in)
9202
      if dev.logical_id[0] == instance.primary_node:
9203
        snode = dev.logical_id[1]
9204
      else:
9205
        snode = dev.logical_id[0]
9206

    
9207
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
9208
                                              instance.name, dev)
9209
    dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
9210

    
9211
    if dev.children:
9212
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
9213
                      for child in dev.children]
9214
    else:
9215
      dev_children = []
9216

    
9217
    data = {
9218
      "iv_name": dev.iv_name,
9219
      "dev_type": dev.dev_type,
9220
      "logical_id": dev.logical_id,
9221
      "physical_id": dev.physical_id,
9222
      "pstatus": dev_pstatus,
9223
      "sstatus": dev_sstatus,
9224
      "children": dev_children,
9225
      "mode": dev.mode,
9226
      "size": dev.size,
9227
      }
9228

    
9229
    return data
9230

    
9231
  def Exec(self, feedback_fn):
9232
    """Gather and return data"""
9233
    result = {}
9234

    
9235
    cluster = self.cfg.GetClusterInfo()
9236

    
9237
    for instance in self.wanted_instances:
9238
      if not self.op.static:
9239
        remote_info = self.rpc.call_instance_info(instance.primary_node,
9240
                                                  instance.name,
9241
                                                  instance.hypervisor)
9242
        remote_info.Raise("Error checking node %s" % instance.primary_node)
9243
        remote_info = remote_info.payload
9244
        if remote_info and "state" in remote_info:
9245
          remote_state = "up"
9246
        else:
9247
          remote_state = "down"
9248
      else:
9249
        remote_state = None
9250
      if instance.admin_up:
9251
        config_state = "up"
9252
      else:
9253
        config_state = "down"
9254

    
9255
      disks = [self._ComputeDiskStatus(instance, None, device)
9256
               for device in instance.disks]
9257

    
9258
      idict = {
9259
        "name": instance.name,
9260
        "config_state": config_state,
9261
        "run_state": remote_state,
9262
        "pnode": instance.primary_node,
9263
        "snodes": instance.secondary_nodes,
9264
        "os": instance.os,
9265
        # this happens to be the same format used for hooks
9266
        "nics": _NICListToTuple(self, instance.nics),
9267
        "disk_template": instance.disk_template,
9268
        "disks": disks,
9269
        "hypervisor": instance.hypervisor,
9270
        "network_port": instance.network_port,
9271
        "hv_instance": instance.hvparams,
9272
        "hv_actual": cluster.FillHV(instance, skip_globals=True),
9273
        "be_instance": instance.beparams,
9274
        "be_actual": cluster.FillBE(instance),
9275
        "os_instance": instance.osparams,
9276
        "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
9277
        "serial_no": instance.serial_no,
9278
        "mtime": instance.mtime,
9279
        "ctime": instance.ctime,
9280
        "uuid": instance.uuid,
9281
        }
9282

    
9283
      result[instance.name] = idict
9284

    
9285
    return result
9286

    
9287

    
9288
class LUInstanceSetParams(LogicalUnit):
9289
  """Modifies an instances's parameters.
9290

9291
  """
9292
  HPATH = "instance-modify"
9293
  HTYPE = constants.HTYPE_INSTANCE
9294
  REQ_BGL = False
9295

    
9296
  def CheckArguments(self):
9297
    if not (self.op.nics or self.op.disks or self.op.disk_template or
9298
            self.op.hvparams or self.op.beparams or self.op.os_name):
9299
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
9300

    
9301
    if self.op.hvparams:
9302
      _CheckGlobalHvParams(self.op.hvparams)
9303

    
9304
    # Disk validation
9305
    disk_addremove = 0
9306
    for disk_op, disk_dict in self.op.disks:
9307
      utils.ForceDictType(disk_dict, constants.IDISK_PARAMS_TYPES)
9308
      if disk_op == constants.DDM_REMOVE:
9309
        disk_addremove += 1
9310
        continue
9311
      elif disk_op == constants.DDM_ADD:
9312
        disk_addremove += 1
9313
      else:
9314
        if not isinstance(disk_op, int):
9315
          raise errors.OpPrereqError("Invalid disk index", errors.ECODE_INVAL)
9316
        if not isinstance(disk_dict, dict):
9317
          msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
9318
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
9319

    
9320
      if disk_op == constants.DDM_ADD:
9321
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
9322
        if mode not in constants.DISK_ACCESS_SET:
9323
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
9324
                                     errors.ECODE_INVAL)
9325
        size = disk_dict.get('size', None)
9326
        if size is None:
9327
          raise errors.OpPrereqError("Required disk parameter size missing",
9328
                                     errors.ECODE_INVAL)
9329
        try:
9330
          size = int(size)
9331
        except (TypeError, ValueError), err:
9332
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
9333
                                     str(err), errors.ECODE_INVAL)
9334
        disk_dict['size'] = size
9335
      else:
9336
        # modification of disk
9337
        if 'size' in disk_dict:
9338
          raise errors.OpPrereqError("Disk size change not possible, use"
9339
                                     " grow-disk", errors.ECODE_INVAL)
9340

    
9341
    if disk_addremove > 1:
9342
      raise errors.OpPrereqError("Only one disk add or remove operation"
9343
                                 " supported at a time", errors.ECODE_INVAL)
9344

    
9345
    if self.op.disks and self.op.disk_template is not None:
9346
      raise errors.OpPrereqError("Disk template conversion and other disk"
9347
                                 " changes not supported at the same time",
9348
                                 errors.ECODE_INVAL)
9349

    
9350
    if (self.op.disk_template and
9351
        self.op.disk_template in constants.DTS_INT_MIRROR and
9352
        self.op.remote_node is None):
9353
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
9354
                                 " one requires specifying a secondary node",
9355
                                 errors.ECODE_INVAL)
9356

    
9357
    # NIC validation
9358
    nic_addremove = 0
9359
    for nic_op, nic_dict in self.op.nics:
9360
      utils.ForceDictType(nic_dict, constants.INIC_PARAMS_TYPES)
9361
      if nic_op == constants.DDM_REMOVE:
9362
        nic_addremove += 1
9363
        continue
9364
      elif nic_op == constants.DDM_ADD:
9365
        nic_addremove += 1
9366
      else:
9367
        if not isinstance(nic_op, int):
9368
          raise errors.OpPrereqError("Invalid nic index", errors.ECODE_INVAL)
9369
        if not isinstance(nic_dict, dict):
9370
          msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
9371
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
9372

    
9373
      # nic_dict should be a dict
9374
      nic_ip = nic_dict.get('ip', None)
9375
      if nic_ip is not None:
9376
        if nic_ip.lower() == constants.VALUE_NONE:
9377
          nic_dict['ip'] = None
9378
        else:
9379
          if not netutils.IPAddress.IsValid(nic_ip):
9380
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip,
9381
                                       errors.ECODE_INVAL)
9382

    
9383
      nic_bridge = nic_dict.get('bridge', None)
9384
      nic_link = nic_dict.get('link', None)
9385
      if nic_bridge and nic_link:
9386
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
9387
                                   " at the same time", errors.ECODE_INVAL)
9388
      elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
9389
        nic_dict['bridge'] = None
9390
      elif nic_link and nic_link.lower() == constants.VALUE_NONE:
9391
        nic_dict['link'] = None
9392

    
9393
      if nic_op == constants.DDM_ADD:
9394
        nic_mac = nic_dict.get('mac', None)
9395
        if nic_mac is None:
9396
          nic_dict['mac'] = constants.VALUE_AUTO
9397

    
9398
      if 'mac' in nic_dict:
9399
        nic_mac = nic_dict['mac']
9400
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
9401
          nic_mac = utils.NormalizeAndValidateMac(nic_mac)
9402

    
9403
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
9404
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
9405
                                     " modifying an existing nic",
9406
                                     errors.ECODE_INVAL)
9407

    
9408
    if nic_addremove > 1:
9409
      raise errors.OpPrereqError("Only one NIC add or remove operation"
9410
                                 " supported at a time", errors.ECODE_INVAL)
9411

    
9412
  def ExpandNames(self):
9413
    self._ExpandAndLockInstance()
9414
    self.needed_locks[locking.LEVEL_NODE] = []
9415
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
9416

    
9417
  def DeclareLocks(self, level):
9418
    if level == locking.LEVEL_NODE:
9419
      self._LockInstancesNodes()
9420
      if self.op.disk_template and self.op.remote_node:
9421
        self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
9422
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
9423

    
9424
  def BuildHooksEnv(self):
9425
    """Build hooks env.
9426

9427
    This runs on the master, primary and secondaries.
9428

9429
    """
9430
    args = dict()
9431
    if constants.BE_MEMORY in self.be_new:
9432
      args['memory'] = self.be_new[constants.BE_MEMORY]
9433
    if constants.BE_VCPUS in self.be_new:
9434
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
9435
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
9436
    # information at all.
9437
    if self.op.nics:
9438
      args['nics'] = []
9439
      nic_override = dict(self.op.nics)
9440
      for idx, nic in enumerate(self.instance.nics):
9441
        if idx in nic_override:
9442
          this_nic_override = nic_override[idx]
9443
        else:
9444
          this_nic_override = {}
9445
        if 'ip' in this_nic_override:
9446
          ip = this_nic_override['ip']
9447
        else:
9448
          ip = nic.ip
9449
        if 'mac' in this_nic_override:
9450
          mac = this_nic_override['mac']
9451
        else:
9452
          mac = nic.mac
9453
        if idx in self.nic_pnew:
9454
          nicparams = self.nic_pnew[idx]
9455
        else:
9456
          nicparams = self.cluster.SimpleFillNIC(nic.nicparams)
9457
        mode = nicparams[constants.NIC_MODE]
9458
        link = nicparams[constants.NIC_LINK]
9459
        args['nics'].append((ip, mac, mode, link))
9460
      if constants.DDM_ADD in nic_override:
9461
        ip = nic_override[constants.DDM_ADD].get('ip', None)
9462
        mac = nic_override[constants.DDM_ADD]['mac']
9463
        nicparams = self.nic_pnew[constants.DDM_ADD]
9464
        mode = nicparams[constants.NIC_MODE]
9465
        link = nicparams[constants.NIC_LINK]
9466
        args['nics'].append((ip, mac, mode, link))
9467
      elif constants.DDM_REMOVE in nic_override:
9468
        del args['nics'][-1]
9469

    
9470
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
9471
    if self.op.disk_template:
9472
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
9473
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
9474
    return env, nl, nl
9475

    
9476
  def CheckPrereq(self):
9477
    """Check prerequisites.
9478

9479
    This only checks the instance list against the existing names.
9480

9481
    """
9482
    # checking the new params on the primary/secondary nodes
9483

    
9484
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
9485
    cluster = self.cluster = self.cfg.GetClusterInfo()
9486
    assert self.instance is not None, \
9487
      "Cannot retrieve locked instance %s" % self.op.instance_name
9488
    pnode = instance.primary_node
9489
    nodelist = list(instance.all_nodes)
9490

    
9491
    # OS change
9492
    if self.op.os_name and not self.op.force:
9493
      _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
9494
                      self.op.force_variant)
9495
      instance_os = self.op.os_name
9496
    else:
9497
      instance_os = instance.os
9498

    
9499
    if self.op.disk_template:
9500
      if instance.disk_template == self.op.disk_template:
9501
        raise errors.OpPrereqError("Instance already has disk template %s" %
9502
                                   instance.disk_template, errors.ECODE_INVAL)
9503

    
9504
      if (instance.disk_template,
9505
          self.op.disk_template) not in self._DISK_CONVERSIONS:
9506
        raise errors.OpPrereqError("Unsupported disk template conversion from"
9507
                                   " %s to %s" % (instance.disk_template,
9508
                                                  self.op.disk_template),
9509
                                   errors.ECODE_INVAL)
9510
      _CheckInstanceDown(self, instance, "cannot change disk template")
9511
      if self.op.disk_template in constants.DTS_INT_MIRROR:
9512
        if self.op.remote_node == pnode:
9513
          raise errors.OpPrereqError("Given new secondary node %s is the same"
9514
                                     " as the primary node of the instance" %
9515
                                     self.op.remote_node, errors.ECODE_STATE)
9516
        _CheckNodeOnline(self, self.op.remote_node)
9517
        _CheckNodeNotDrained(self, self.op.remote_node)
9518
        # FIXME: here we assume that the old instance type is DT_PLAIN
9519
        assert instance.disk_template == constants.DT_PLAIN
9520
        disks = [{"size": d.size, "vg": d.logical_id[0]}
9521
                 for d in instance.disks]
9522
        required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
9523
        _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
9524

    
9525
    # hvparams processing
9526
    if self.op.hvparams:
9527
      hv_type = instance.hypervisor
9528
      i_hvdict = _GetUpdatedParams(instance.hvparams, self.op.hvparams)
9529
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
9530
      hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
9531

    
9532
      # local check
9533
      hypervisor.GetHypervisor(hv_type).CheckParameterSyntax(hv_new)
9534
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
9535
      self.hv_new = hv_new # the new actual values
9536
      self.hv_inst = i_hvdict # the new dict (without defaults)
9537
    else:
9538
      self.hv_new = self.hv_inst = {}
9539

    
9540
    # beparams processing
9541
    if self.op.beparams:
9542
      i_bedict = _GetUpdatedParams(instance.beparams, self.op.beparams,
9543
                                   use_none=True)
9544
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
9545
      be_new = cluster.SimpleFillBE(i_bedict)
9546
      self.be_new = be_new # the new actual values
9547
      self.be_inst = i_bedict # the new dict (without defaults)
9548
    else:
9549
      self.be_new = self.be_inst = {}
9550

    
9551
    # osparams processing
9552
    if self.op.osparams:
9553
      i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
9554
      _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
9555
      self.os_inst = i_osdict # the new dict (without defaults)
9556
    else:
9557
      self.os_inst = {}
9558

    
9559
    self.warn = []
9560

    
9561
    if constants.BE_MEMORY in self.op.beparams and not self.op.force:
9562
      mem_check_list = [pnode]
9563
      if be_new[constants.BE_AUTO_BALANCE]:
9564
        # either we changed auto_balance to yes or it was from before
9565
        mem_check_list.extend(instance.secondary_nodes)
9566
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
9567
                                                  instance.hypervisor)
9568
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
9569
                                         instance.hypervisor)
9570
      pninfo = nodeinfo[pnode]
9571
      msg = pninfo.fail_msg
9572
      if msg:
9573
        # Assume the primary node is unreachable and go ahead
9574
        self.warn.append("Can't get info from primary node %s: %s" %
9575
                         (pnode,  msg))
9576
      elif not isinstance(pninfo.payload.get('memory_free', None), int):
9577
        self.warn.append("Node data from primary node %s doesn't contain"
9578
                         " free memory information" % pnode)
9579
      elif instance_info.fail_msg:
9580
        self.warn.append("Can't get instance runtime information: %s" %
9581
                        instance_info.fail_msg)
9582
      else:
9583
        if instance_info.payload:
9584
          current_mem = int(instance_info.payload['memory'])
9585
        else:
9586
          # Assume instance not running
9587
          # (there is a slight race condition here, but it's not very probable,
9588
          # and we have no other way to check)
9589
          current_mem = 0
9590
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
9591
                    pninfo.payload['memory_free'])
9592
        if miss_mem > 0:
9593
          raise errors.OpPrereqError("This change will prevent the instance"
9594
                                     " from starting, due to %d MB of memory"
9595
                                     " missing on its primary node" % miss_mem,
9596
                                     errors.ECODE_NORES)
9597

    
9598
      if be_new[constants.BE_AUTO_BALANCE]:
9599
        for node, nres in nodeinfo.items():
9600
          if node not in instance.secondary_nodes:
9601
            continue
9602
          msg = nres.fail_msg
9603
          if msg:
9604
            self.warn.append("Can't get info from secondary node %s: %s" %
9605
                             (node, msg))
9606
          elif not isinstance(nres.payload.get('memory_free', None), int):
9607
            self.warn.append("Secondary node %s didn't return free"
9608
                             " memory information" % node)
9609
          elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
9610
            self.warn.append("Not enough memory to failover instance to"
9611
                             " secondary node %s" % node)
9612

    
9613
    # NIC processing
9614
    self.nic_pnew = {}
9615
    self.nic_pinst = {}
9616
    for nic_op, nic_dict in self.op.nics:
9617
      if nic_op == constants.DDM_REMOVE:
9618
        if not instance.nics:
9619
          raise errors.OpPrereqError("Instance has no NICs, cannot remove",
9620
                                     errors.ECODE_INVAL)
9621
        continue
9622
      if nic_op != constants.DDM_ADD:
9623
        # an existing nic
9624
        if not instance.nics:
9625
          raise errors.OpPrereqError("Invalid NIC index %s, instance has"
9626
                                     " no NICs" % nic_op,
9627
                                     errors.ECODE_INVAL)
9628
        if nic_op < 0 or nic_op >= len(instance.nics):
9629
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
9630
                                     " are 0 to %d" %
9631
                                     (nic_op, len(instance.nics) - 1),
9632
                                     errors.ECODE_INVAL)
9633
        old_nic_params = instance.nics[nic_op].nicparams
9634
        old_nic_ip = instance.nics[nic_op].ip
9635
      else:
9636
        old_nic_params = {}
9637
        old_nic_ip = None
9638

    
9639
      update_params_dict = dict([(key, nic_dict[key])
9640
                                 for key in constants.NICS_PARAMETERS
9641
                                 if key in nic_dict])
9642

    
9643
      if 'bridge' in nic_dict:
9644
        update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
9645

    
9646
      new_nic_params = _GetUpdatedParams(old_nic_params,
9647
                                         update_params_dict)
9648
      utils.ForceDictType(new_nic_params, constants.NICS_PARAMETER_TYPES)
9649
      new_filled_nic_params = cluster.SimpleFillNIC(new_nic_params)
9650
      objects.NIC.CheckParameterSyntax(new_filled_nic_params)
9651
      self.nic_pinst[nic_op] = new_nic_params
9652
      self.nic_pnew[nic_op] = new_filled_nic_params
9653
      new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
9654

    
9655
      if new_nic_mode == constants.NIC_MODE_BRIDGED:
9656
        nic_bridge = new_filled_nic_params[constants.NIC_LINK]
9657
        msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
9658
        if msg:
9659
          msg = "Error checking bridges on node %s: %s" % (pnode, msg)
9660
          if self.op.force:
9661
            self.warn.append(msg)
9662
          else:
9663
            raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
9664
      if new_nic_mode == constants.NIC_MODE_ROUTED:
9665
        if 'ip' in nic_dict:
9666
          nic_ip = nic_dict['ip']
9667
        else:
9668
          nic_ip = old_nic_ip
9669
        if nic_ip is None:
9670
          raise errors.OpPrereqError('Cannot set the nic ip to None'
9671
                                     ' on a routed nic', errors.ECODE_INVAL)
9672
      if 'mac' in nic_dict:
9673
        nic_mac = nic_dict['mac']
9674
        if nic_mac is None:
9675
          raise errors.OpPrereqError('Cannot set the nic mac to None',
9676
                                     errors.ECODE_INVAL)
9677
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
9678
          # otherwise generate the mac
9679
          nic_dict['mac'] = self.cfg.GenerateMAC(self.proc.GetECId())
9680
        else:
9681
          # or validate/reserve the current one
9682
          try:
9683
            self.cfg.ReserveMAC(nic_mac, self.proc.GetECId())
9684
          except errors.ReservationError:
9685
            raise errors.OpPrereqError("MAC address %s already in use"
9686
                                       " in cluster" % nic_mac,
9687
                                       errors.ECODE_NOTUNIQUE)
9688

    
9689
    # DISK processing
9690
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
9691
      raise errors.OpPrereqError("Disk operations not supported for"
9692
                                 " diskless instances",
9693
                                 errors.ECODE_INVAL)
9694
    for disk_op, _ in self.op.disks:
9695
      if disk_op == constants.DDM_REMOVE:
9696
        if len(instance.disks) == 1:
9697
          raise errors.OpPrereqError("Cannot remove the last disk of"
9698
                                     " an instance", errors.ECODE_INVAL)
9699
        _CheckInstanceDown(self, instance, "cannot remove disks")
9700

    
9701
      if (disk_op == constants.DDM_ADD and
9702
          len(instance.disks) >= constants.MAX_DISKS):
9703
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
9704
                                   " add more" % constants.MAX_DISKS,
9705
                                   errors.ECODE_STATE)
9706
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
9707
        # an existing disk
9708
        if disk_op < 0 or disk_op >= len(instance.disks):
9709
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
9710
                                     " are 0 to %d" %
9711
                                     (disk_op, len(instance.disks)),
9712
                                     errors.ECODE_INVAL)
9713

    
9714
    return
9715

    
9716
  def _ConvertPlainToDrbd(self, feedback_fn):
9717
    """Converts an instance from plain to drbd.
9718

9719
    """
9720
    feedback_fn("Converting template to drbd")
9721
    instance = self.instance
9722
    pnode = instance.primary_node
9723
    snode = self.op.remote_node
9724

    
9725
    # create a fake disk info for _GenerateDiskTemplate
9726
    disk_info = [{"size": d.size, "mode": d.mode} for d in instance.disks]
9727
    new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
9728
                                      instance.name, pnode, [snode],
9729
                                      disk_info, None, None, 0, feedback_fn)
9730
    info = _GetInstanceInfoText(instance)
9731
    feedback_fn("Creating aditional volumes...")
9732
    # first, create the missing data and meta devices
9733
    for disk in new_disks:
9734
      # unfortunately this is... not too nice
9735
      _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
9736
                            info, True)
9737
      for child in disk.children:
9738
        _CreateSingleBlockDev(self, snode, instance, child, info, True)
9739
    # at this stage, all new LVs have been created, we can rename the
9740
    # old ones
9741
    feedback_fn("Renaming original volumes...")
9742
    rename_list = [(o, n.children[0].logical_id)
9743
                   for (o, n) in zip(instance.disks, new_disks)]
9744
    result = self.rpc.call_blockdev_rename(pnode, rename_list)
9745
    result.Raise("Failed to rename original LVs")
9746

    
9747
    feedback_fn("Initializing DRBD devices...")
9748
    # all child devices are in place, we can now create the DRBD devices
9749
    for disk in new_disks:
9750
      for node in [pnode, snode]:
9751
        f_create = node == pnode
9752
        _CreateSingleBlockDev(self, node, instance, disk, info, f_create)
9753

    
9754
    # at this point, the instance has been modified
9755
    instance.disk_template = constants.DT_DRBD8
9756
    instance.disks = new_disks
9757
    self.cfg.Update(instance, feedback_fn)
9758

    
9759
    # disks are created, waiting for sync
9760
    disk_abort = not _WaitForSync(self, instance)
9761
    if disk_abort:
9762
      raise errors.OpExecError("There are some degraded disks for"
9763
                               " this instance, please cleanup manually")
9764

    
9765
  def _ConvertDrbdToPlain(self, feedback_fn):
9766
    """Converts an instance from drbd to plain.
9767

9768
    """
9769
    instance = self.instance
9770
    assert len(instance.secondary_nodes) == 1
9771
    pnode = instance.primary_node
9772
    snode = instance.secondary_nodes[0]
9773
    feedback_fn("Converting template to plain")
9774

    
9775
    old_disks = instance.disks
9776
    new_disks = [d.children[0] for d in old_disks]
9777

    
9778
    # copy over size and mode
9779
    for parent, child in zip(old_disks, new_disks):
9780
      child.size = parent.size
9781
      child.mode = parent.mode
9782

    
9783
    # update instance structure
9784
    instance.disks = new_disks
9785
    instance.disk_template = constants.DT_PLAIN
9786
    self.cfg.Update(instance, feedback_fn)
9787

    
9788
    feedback_fn("Removing volumes on the secondary node...")
9789
    for disk in old_disks:
9790
      self.cfg.SetDiskID(disk, snode)
9791
      msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
9792
      if msg:
9793
        self.LogWarning("Could not remove block device %s on node %s,"
9794
                        " continuing anyway: %s", disk.iv_name, snode, msg)
9795

    
9796
    feedback_fn("Removing unneeded volumes on the primary node...")
9797
    for idx, disk in enumerate(old_disks):
9798
      meta = disk.children[1]
9799
      self.cfg.SetDiskID(meta, pnode)
9800
      msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
9801
      if msg:
9802
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
9803
                        " continuing anyway: %s", idx, pnode, msg)
9804

    
9805
  def Exec(self, feedback_fn):
9806
    """Modifies an instance.
9807

9808
    All parameters take effect only at the next restart of the instance.
9809

9810
    """
9811
    # Process here the warnings from CheckPrereq, as we don't have a
9812
    # feedback_fn there.
9813
    for warn in self.warn:
9814
      feedback_fn("WARNING: %s" % warn)
9815

    
9816
    result = []
9817
    instance = self.instance
9818
    # disk changes
9819
    for disk_op, disk_dict in self.op.disks:
9820
      if disk_op == constants.DDM_REMOVE:
9821
        # remove the last disk
9822
        device = instance.disks.pop()
9823
        device_idx = len(instance.disks)
9824
        for node, disk in device.ComputeNodeTree(instance.primary_node):
9825
          self.cfg.SetDiskID(disk, node)
9826
          msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
9827
          if msg:
9828
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
9829
                            " continuing anyway", device_idx, node, msg)
9830
        result.append(("disk/%d" % device_idx, "remove"))
9831
      elif disk_op == constants.DDM_ADD:
9832
        # add a new disk
9833
        if instance.disk_template in (constants.DT_FILE,
9834
                                        constants.DT_SHARED_FILE):
9835
          file_driver, file_path = instance.disks[0].logical_id
9836
          file_path = os.path.dirname(file_path)
9837
        else:
9838
          file_driver = file_path = None
9839
        disk_idx_base = len(instance.disks)
9840
        new_disk = _GenerateDiskTemplate(self,
9841
                                         instance.disk_template,
9842
                                         instance.name, instance.primary_node,
9843
                                         instance.secondary_nodes,
9844
                                         [disk_dict],
9845
                                         file_path,
9846
                                         file_driver,
9847
                                         disk_idx_base, feedback_fn)[0]
9848
        instance.disks.append(new_disk)
9849
        info = _GetInstanceInfoText(instance)
9850

    
9851
        logging.info("Creating volume %s for instance %s",
9852
                     new_disk.iv_name, instance.name)
9853
        # Note: this needs to be kept in sync with _CreateDisks
9854
        #HARDCODE
9855
        for node in instance.all_nodes:
9856
          f_create = node == instance.primary_node
9857
          try:
9858
            _CreateBlockDev(self, node, instance, new_disk,
9859
                            f_create, info, f_create)
9860
          except errors.OpExecError, err:
9861
            self.LogWarning("Failed to create volume %s (%s) on"
9862
                            " node %s: %s",
9863
                            new_disk.iv_name, new_disk, node, err)
9864
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
9865
                       (new_disk.size, new_disk.mode)))
9866
      else:
9867
        # change a given disk
9868
        instance.disks[disk_op].mode = disk_dict['mode']
9869
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
9870

    
9871
    if self.op.disk_template:
9872
      r_shut = _ShutdownInstanceDisks(self, instance)
9873
      if not r_shut:
9874
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
9875
                                 " proceed with disk template conversion")
9876
      mode = (instance.disk_template, self.op.disk_template)
9877
      try:
9878
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
9879
      except:
9880
        self.cfg.ReleaseDRBDMinors(instance.name)
9881
        raise
9882
      result.append(("disk_template", self.op.disk_template))
9883

    
9884
    # NIC changes
9885
    for nic_op, nic_dict in self.op.nics:
9886
      if nic_op == constants.DDM_REMOVE:
9887
        # remove the last nic
9888
        del instance.nics[-1]
9889
        result.append(("nic.%d" % len(instance.nics), "remove"))
9890
      elif nic_op == constants.DDM_ADD:
9891
        # mac and bridge should be set, by now
9892
        mac = nic_dict['mac']
9893
        ip = nic_dict.get('ip', None)
9894
        nicparams = self.nic_pinst[constants.DDM_ADD]
9895
        new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
9896
        instance.nics.append(new_nic)
9897
        result.append(("nic.%d" % (len(instance.nics) - 1),
9898
                       "add:mac=%s,ip=%s,mode=%s,link=%s" %
9899
                       (new_nic.mac, new_nic.ip,
9900
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
9901
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
9902
                       )))
9903
      else:
9904
        for key in 'mac', 'ip':
9905
          if key in nic_dict:
9906
            setattr(instance.nics[nic_op], key, nic_dict[key])
9907
        if nic_op in self.nic_pinst:
9908
          instance.nics[nic_op].nicparams = self.nic_pinst[nic_op]
9909
        for key, val in nic_dict.iteritems():
9910
          result.append(("nic.%s/%d" % (key, nic_op), val))
9911

    
9912
    # hvparams changes
9913
    if self.op.hvparams:
9914
      instance.hvparams = self.hv_inst
9915
      for key, val in self.op.hvparams.iteritems():
9916
        result.append(("hv/%s" % key, val))
9917

    
9918
    # beparams changes
9919
    if self.op.beparams:
9920
      instance.beparams = self.be_inst
9921
      for key, val in self.op.beparams.iteritems():
9922
        result.append(("be/%s" % key, val))
9923

    
9924
    # OS change
9925
    if self.op.os_name:
9926
      instance.os = self.op.os_name
9927

    
9928
    # osparams changes
9929
    if self.op.osparams:
9930
      instance.osparams = self.os_inst
9931
      for key, val in self.op.osparams.iteritems():
9932
        result.append(("os/%s" % key, val))
9933

    
9934
    self.cfg.Update(instance, feedback_fn)
9935

    
9936
    return result
9937

    
9938
  _DISK_CONVERSIONS = {
9939
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
9940
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
9941
    }
9942

    
9943

    
9944
class LUBackupQuery(NoHooksLU):
9945
  """Query the exports list
9946

9947
  """
9948
  REQ_BGL = False
9949

    
9950
  def ExpandNames(self):
9951
    self.needed_locks = {}
9952
    self.share_locks[locking.LEVEL_NODE] = 1
9953
    if not self.op.nodes:
9954
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9955
    else:
9956
      self.needed_locks[locking.LEVEL_NODE] = \
9957
        _GetWantedNodes(self, self.op.nodes)
9958

    
9959
  def Exec(self, feedback_fn):
9960
    """Compute the list of all the exported system images.
9961

9962
    @rtype: dict
9963
    @return: a dictionary with the structure node->(export-list)
9964
        where export-list is a list of the instances exported on
9965
        that node.
9966

9967
    """
9968
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
9969
    rpcresult = self.rpc.call_export_list(self.nodes)
9970
    result = {}
9971
    for node in rpcresult:
9972
      if rpcresult[node].fail_msg:
9973
        result[node] = False
9974
      else:
9975
        result[node] = rpcresult[node].payload
9976

    
9977
    return result
9978

    
9979

    
9980
class LUBackupPrepare(NoHooksLU):
9981
  """Prepares an instance for an export and returns useful information.
9982

9983
  """
9984
  REQ_BGL = False
9985

    
9986
  def ExpandNames(self):
9987
    self._ExpandAndLockInstance()
9988

    
9989
  def CheckPrereq(self):
9990
    """Check prerequisites.
9991

9992
    """
9993
    instance_name = self.op.instance_name
9994

    
9995
    self.instance = self.cfg.GetInstanceInfo(instance_name)
9996
    assert self.instance is not None, \
9997
          "Cannot retrieve locked instance %s" % self.op.instance_name
9998
    _CheckNodeOnline(self, self.instance.primary_node)
9999

    
10000
    self._cds = _GetClusterDomainSecret()
10001

    
10002
  def Exec(self, feedback_fn):
10003
    """Prepares an instance for an export.
10004

10005
    """
10006
    instance = self.instance
10007

    
10008
    if self.op.mode == constants.EXPORT_MODE_REMOTE:
10009
      salt = utils.GenerateSecret(8)
10010

    
10011
      feedback_fn("Generating X509 certificate on %s" % instance.primary_node)
10012
      result = self.rpc.call_x509_cert_create(instance.primary_node,
10013
                                              constants.RIE_CERT_VALIDITY)
10014
      result.Raise("Can't create X509 key and certificate on %s" % result.node)
10015

    
10016
      (name, cert_pem) = result.payload
10017

    
10018
      cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
10019
                                             cert_pem)
10020

    
10021
      return {
10022
        "handshake": masterd.instance.ComputeRemoteExportHandshake(self._cds),
10023
        "x509_key_name": (name, utils.Sha1Hmac(self._cds, name, salt=salt),
10024
                          salt),
10025
        "x509_ca": utils.SignX509Certificate(cert, self._cds, salt),
10026
        }
10027

    
10028
    return None
10029

    
10030

    
10031
class LUBackupExport(LogicalUnit):
10032
  """Export an instance to an image in the cluster.
10033

10034
  """
10035
  HPATH = "instance-export"
10036
  HTYPE = constants.HTYPE_INSTANCE
10037
  REQ_BGL = False
10038

    
10039
  def CheckArguments(self):
10040
    """Check the arguments.
10041

10042
    """
10043
    self.x509_key_name = self.op.x509_key_name
10044
    self.dest_x509_ca_pem = self.op.destination_x509_ca
10045

    
10046
    if self.op.mode == constants.EXPORT_MODE_REMOTE:
10047
      if not self.x509_key_name:
10048
        raise errors.OpPrereqError("Missing X509 key name for encryption",
10049
                                   errors.ECODE_INVAL)
10050

    
10051
      if not self.dest_x509_ca_pem:
10052
        raise errors.OpPrereqError("Missing destination X509 CA",
10053
                                   errors.ECODE_INVAL)
10054

    
10055
  def ExpandNames(self):
10056
    self._ExpandAndLockInstance()
10057

    
10058
    # Lock all nodes for local exports
10059
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
10060
      # FIXME: lock only instance primary and destination node
10061
      #
10062
      # Sad but true, for now we have do lock all nodes, as we don't know where
10063
      # the previous export might be, and in this LU we search for it and
10064
      # remove it from its current node. In the future we could fix this by:
10065
      #  - making a tasklet to search (share-lock all), then create the
10066
      #    new one, then one to remove, after
10067
      #  - removing the removal operation altogether
10068
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
10069

    
10070
  def DeclareLocks(self, level):
10071
    """Last minute lock declaration."""
10072
    # All nodes are locked anyway, so nothing to do here.
10073

    
10074
  def BuildHooksEnv(self):
10075
    """Build hooks env.
10076

10077
    This will run on the master, primary node and target node.
10078

10079
    """
10080
    env = {
10081
      "EXPORT_MODE": self.op.mode,
10082
      "EXPORT_NODE": self.op.target_node,
10083
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
10084
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
10085
      # TODO: Generic function for boolean env variables
10086
      "REMOVE_INSTANCE": str(bool(self.op.remove_instance)),
10087
      }
10088

    
10089
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
10090

    
10091
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node]
10092

    
10093
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
10094
      nl.append(self.op.target_node)
10095

    
10096
    return env, nl, nl
10097

    
10098
  def CheckPrereq(self):
10099
    """Check prerequisites.
10100

10101
    This checks that the instance and node names are valid.
10102

10103
    """
10104
    instance_name = self.op.instance_name
10105

    
10106
    self.instance = self.cfg.GetInstanceInfo(instance_name)
10107
    assert self.instance is not None, \
10108
          "Cannot retrieve locked instance %s" % self.op.instance_name
10109
    _CheckNodeOnline(self, self.instance.primary_node)
10110

    
10111
    if (self.op.remove_instance and self.instance.admin_up and
10112
        not self.op.shutdown):
10113
      raise errors.OpPrereqError("Can not remove instance without shutting it"
10114
                                 " down before")
10115

    
10116
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
10117
      self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
10118
      self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
10119
      assert self.dst_node is not None
10120

    
10121
      _CheckNodeOnline(self, self.dst_node.name)
10122
      _CheckNodeNotDrained(self, self.dst_node.name)
10123

    
10124
      self._cds = None
10125
      self.dest_disk_info = None
10126
      self.dest_x509_ca = None
10127

    
10128
    elif self.op.mode == constants.EXPORT_MODE_REMOTE:
10129
      self.dst_node = None
10130

    
10131
      if len(self.op.target_node) != len(self.instance.disks):
10132
        raise errors.OpPrereqError(("Received destination information for %s"
10133
                                    " disks, but instance %s has %s disks") %
10134
                                   (len(self.op.target_node), instance_name,
10135
                                    len(self.instance.disks)),
10136
                                   errors.ECODE_INVAL)
10137

    
10138
      cds = _GetClusterDomainSecret()
10139

    
10140
      # Check X509 key name
10141
      try:
10142
        (key_name, hmac_digest, hmac_salt) = self.x509_key_name
10143
      except (TypeError, ValueError), err:
10144
        raise errors.OpPrereqError("Invalid data for X509 key name: %s" % err)
10145

    
10146
      if not utils.VerifySha1Hmac(cds, key_name, hmac_digest, salt=hmac_salt):
10147
        raise errors.OpPrereqError("HMAC for X509 key name is wrong",
10148
                                   errors.ECODE_INVAL)
10149

    
10150
      # Load and verify CA
10151
      try:
10152
        (cert, _) = utils.LoadSignedX509Certificate(self.dest_x509_ca_pem, cds)
10153
      except OpenSSL.crypto.Error, err:
10154
        raise errors.OpPrereqError("Unable to load destination X509 CA (%s)" %
10155
                                   (err, ), errors.ECODE_INVAL)
10156

    
10157
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
10158
      if errcode is not None:
10159
        raise errors.OpPrereqError("Invalid destination X509 CA (%s)" %
10160
                                   (msg, ), errors.ECODE_INVAL)
10161

    
10162
      self.dest_x509_ca = cert
10163

    
10164
      # Verify target information
10165
      disk_info = []
10166
      for idx, disk_data in enumerate(self.op.target_node):
10167
        try:
10168
          (host, port, magic) = \
10169
            masterd.instance.CheckRemoteExportDiskInfo(cds, idx, disk_data)
10170
        except errors.GenericError, err:
10171
          raise errors.OpPrereqError("Target info for disk %s: %s" %
10172
                                     (idx, err), errors.ECODE_INVAL)
10173

    
10174
        disk_info.append((host, port, magic))
10175

    
10176
      assert len(disk_info) == len(self.op.target_node)
10177
      self.dest_disk_info = disk_info
10178

    
10179
    else:
10180
      raise errors.ProgrammerError("Unhandled export mode %r" %
10181
                                   self.op.mode)
10182

    
10183
    # instance disk type verification
10184
    # TODO: Implement export support for file-based disks
10185
    for disk in self.instance.disks:
10186
      if disk.dev_type == constants.LD_FILE:
10187
        raise errors.OpPrereqError("Export not supported for instances with"
10188
                                   " file-based disks", errors.ECODE_INVAL)
10189

    
10190
  def _CleanupExports(self, feedback_fn):
10191
    """Removes exports of current instance from all other nodes.
10192

10193
    If an instance in a cluster with nodes A..D was exported to node C, its
10194
    exports will be removed from the nodes A, B and D.
10195

10196
    """
10197
    assert self.op.mode != constants.EXPORT_MODE_REMOTE
10198

    
10199
    nodelist = self.cfg.GetNodeList()
10200
    nodelist.remove(self.dst_node.name)
10201

    
10202
    # on one-node clusters nodelist will be empty after the removal
10203
    # if we proceed the backup would be removed because OpBackupQuery
10204
    # substitutes an empty list with the full cluster node list.
10205
    iname = self.instance.name
10206
    if nodelist:
10207
      feedback_fn("Removing old exports for instance %s" % iname)
10208
      exportlist = self.rpc.call_export_list(nodelist)
10209
      for node in exportlist:
10210
        if exportlist[node].fail_msg:
10211
          continue
10212
        if iname in exportlist[node].payload:
10213
          msg = self.rpc.call_export_remove(node, iname).fail_msg
10214
          if msg:
10215
            self.LogWarning("Could not remove older export for instance %s"
10216
                            " on node %s: %s", iname, node, msg)
10217

    
10218
  def Exec(self, feedback_fn):
10219
    """Export an instance to an image in the cluster.
10220

10221
    """
10222
    assert self.op.mode in constants.EXPORT_MODES
10223

    
10224
    instance = self.instance
10225
    src_node = instance.primary_node
10226

    
10227
    if self.op.shutdown:
10228
      # shutdown the instance, but not the disks
10229
      feedback_fn("Shutting down instance %s" % instance.name)
10230
      result = self.rpc.call_instance_shutdown(src_node, instance,
10231
                                               self.op.shutdown_timeout)
10232
      # TODO: Maybe ignore failures if ignore_remove_failures is set
10233
      result.Raise("Could not shutdown instance %s on"
10234
                   " node %s" % (instance.name, src_node))
10235

    
10236
    # set the disks ID correctly since call_instance_start needs the
10237
    # correct drbd minor to create the symlinks
10238
    for disk in instance.disks:
10239
      self.cfg.SetDiskID(disk, src_node)
10240

    
10241
    activate_disks = (not instance.admin_up)
10242

    
10243
    if activate_disks:
10244
      # Activate the instance disks if we'exporting a stopped instance
10245
      feedback_fn("Activating disks for %s" % instance.name)
10246
      _StartInstanceDisks(self, instance, None)
10247

    
10248
    try:
10249
      helper = masterd.instance.ExportInstanceHelper(self, feedback_fn,
10250
                                                     instance)
10251

    
10252
      helper.CreateSnapshots()
10253
      try:
10254
        if (self.op.shutdown and instance.admin_up and
10255
            not self.op.remove_instance):
10256
          assert not activate_disks
10257
          feedback_fn("Starting instance %s" % instance.name)
10258
          result = self.rpc.call_instance_start(src_node, instance, None, None)
10259
          msg = result.fail_msg
10260
          if msg:
10261
            feedback_fn("Failed to start instance: %s" % msg)
10262
            _ShutdownInstanceDisks(self, instance)
10263
            raise errors.OpExecError("Could not start instance: %s" % msg)
10264

    
10265
        if self.op.mode == constants.EXPORT_MODE_LOCAL:
10266
          (fin_resu, dresults) = helper.LocalExport(self.dst_node)
10267
        elif self.op.mode == constants.EXPORT_MODE_REMOTE:
10268
          connect_timeout = constants.RIE_CONNECT_TIMEOUT
10269
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
10270

    
10271
          (key_name, _, _) = self.x509_key_name
10272

    
10273
          dest_ca_pem = \
10274
            OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
10275
                                            self.dest_x509_ca)
10276

    
10277
          (fin_resu, dresults) = helper.RemoteExport(self.dest_disk_info,
10278
                                                     key_name, dest_ca_pem,
10279
                                                     timeouts)
10280
      finally:
10281
        helper.Cleanup()
10282

    
10283
      # Check for backwards compatibility
10284
      assert len(dresults) == len(instance.disks)
10285
      assert compat.all(isinstance(i, bool) for i in dresults), \
10286
             "Not all results are boolean: %r" % dresults
10287

    
10288
    finally:
10289
      if activate_disks:
10290
        feedback_fn("Deactivating disks for %s" % instance.name)
10291
        _ShutdownInstanceDisks(self, instance)
10292

    
10293
    if not (compat.all(dresults) and fin_resu):
10294
      failures = []
10295
      if not fin_resu:
10296
        failures.append("export finalization")
10297
      if not compat.all(dresults):
10298
        fdsk = utils.CommaJoin(idx for (idx, dsk) in enumerate(dresults)
10299
                               if not dsk)
10300
        failures.append("disk export: disk(s) %s" % fdsk)
10301

    
10302
      raise errors.OpExecError("Export failed, errors in %s" %
10303
                               utils.CommaJoin(failures))
10304

    
10305
    # At this point, the export was successful, we can cleanup/finish
10306

    
10307
    # Remove instance if requested
10308
    if self.op.remove_instance:
10309
      feedback_fn("Removing instance %s" % instance.name)
10310
      _RemoveInstance(self, feedback_fn, instance,
10311
                      self.op.ignore_remove_failures)
10312

    
10313
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
10314
      self._CleanupExports(feedback_fn)
10315

    
10316
    return fin_resu, dresults
10317

    
10318

    
10319
class LUBackupRemove(NoHooksLU):
10320
  """Remove exports related to the named instance.
10321

10322
  """
10323
  REQ_BGL = False
10324

    
10325
  def ExpandNames(self):
10326
    self.needed_locks = {}
10327
    # We need all nodes to be locked in order for RemoveExport to work, but we
10328
    # don't need to lock the instance itself, as nothing will happen to it (and
10329
    # we can remove exports also for a removed instance)
10330
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
10331

    
10332
  def Exec(self, feedback_fn):
10333
    """Remove any export.
10334

10335
    """
10336
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
10337
    # If the instance was not found we'll try with the name that was passed in.
10338
    # This will only work if it was an FQDN, though.
10339
    fqdn_warn = False
10340
    if not instance_name:
10341
      fqdn_warn = True
10342
      instance_name = self.op.instance_name
10343

    
10344
    locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
10345
    exportlist = self.rpc.call_export_list(locked_nodes)
10346
    found = False
10347
    for node in exportlist:
10348
      msg = exportlist[node].fail_msg
10349
      if msg:
10350
        self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
10351
        continue
10352
      if instance_name in exportlist[node].payload:
10353
        found = True
10354
        result = self.rpc.call_export_remove(node, instance_name)
10355
        msg = result.fail_msg
10356
        if msg:
10357
          logging.error("Could not remove export for instance %s"
10358
                        " on node %s: %s", instance_name, node, msg)
10359

    
10360
    if fqdn_warn and not found:
10361
      feedback_fn("Export not found. If trying to remove an export belonging"
10362
                  " to a deleted instance please use its Fully Qualified"
10363
                  " Domain Name.")
10364

    
10365

    
10366
class LUGroupAdd(LogicalUnit):
10367
  """Logical unit for creating node groups.
10368

10369
  """
10370
  HPATH = "group-add"
10371
  HTYPE = constants.HTYPE_GROUP
10372
  REQ_BGL = False
10373

    
10374
  def ExpandNames(self):
10375
    # We need the new group's UUID here so that we can create and acquire the
10376
    # corresponding lock. Later, in Exec(), we'll indicate to cfg.AddNodeGroup
10377
    # that it should not check whether the UUID exists in the configuration.
10378
    self.group_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
10379
    self.needed_locks = {}
10380
    self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
10381

    
10382
  def CheckPrereq(self):
10383
    """Check prerequisites.
10384

10385
    This checks that the given group name is not an existing node group
10386
    already.
10387

10388
    """
10389
    try:
10390
      existing_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10391
    except errors.OpPrereqError:
10392
      pass
10393
    else:
10394
      raise errors.OpPrereqError("Desired group name '%s' already exists as a"
10395
                                 " node group (UUID: %s)" %
10396
                                 (self.op.group_name, existing_uuid),
10397
                                 errors.ECODE_EXISTS)
10398

    
10399
    if self.op.ndparams:
10400
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
10401

    
10402
  def BuildHooksEnv(self):
10403
    """Build hooks env.
10404

10405
    """
10406
    env = {
10407
      "GROUP_NAME": self.op.group_name,
10408
      }
10409
    mn = self.cfg.GetMasterNode()
10410
    return env, [mn], [mn]
10411

    
10412
  def Exec(self, feedback_fn):
10413
    """Add the node group to the cluster.
10414

10415
    """
10416
    group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
10417
                                  uuid=self.group_uuid,
10418
                                  alloc_policy=self.op.alloc_policy,
10419
                                  ndparams=self.op.ndparams)
10420

    
10421
    self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
10422
    del self.remove_locks[locking.LEVEL_NODEGROUP]
10423

    
10424

    
10425
class LUGroupAssignNodes(NoHooksLU):
10426
  """Logical unit for assigning nodes to groups.
10427

10428
  """
10429
  REQ_BGL = False
10430

    
10431
  def ExpandNames(self):
10432
    # These raise errors.OpPrereqError on their own:
10433
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10434
    self.op.nodes = _GetWantedNodes(self, self.op.nodes)
10435

    
10436
    # We want to lock all the affected nodes and groups. We have readily
10437
    # available the list of nodes, and the *destination* group. To gather the
10438
    # list of "source" groups, we need to fetch node information.
10439
    self.node_data = self.cfg.GetAllNodesInfo()
10440
    affected_groups = set(self.node_data[node].group for node in self.op.nodes)
10441
    affected_groups.add(self.group_uuid)
10442

    
10443
    self.needed_locks = {
10444
      locking.LEVEL_NODEGROUP: list(affected_groups),
10445
      locking.LEVEL_NODE: self.op.nodes,
10446
      }
10447

    
10448
  def CheckPrereq(self):
10449
    """Check prerequisites.
10450

10451
    """
10452
    self.group = self.cfg.GetNodeGroup(self.group_uuid)
10453
    instance_data = self.cfg.GetAllInstancesInfo()
10454

    
10455
    if self.group is None:
10456
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
10457
                               (self.op.group_name, self.group_uuid))
10458

    
10459
    (new_splits, previous_splits) = \
10460
      self.CheckAssignmentForSplitInstances([(node, self.group_uuid)
10461
                                             for node in self.op.nodes],
10462
                                            self.node_data, instance_data)
10463

    
10464
    if new_splits:
10465
      fmt_new_splits = utils.CommaJoin(utils.NiceSort(new_splits))
10466

    
10467
      if not self.op.force:
10468
        raise errors.OpExecError("The following instances get split by this"
10469
                                 " change and --force was not given: %s" %
10470
                                 fmt_new_splits)
10471
      else:
10472
        self.LogWarning("This operation will split the following instances: %s",
10473
                        fmt_new_splits)
10474

    
10475
        if previous_splits:
10476
          self.LogWarning("In addition, these already-split instances continue"
10477
                          " to be spit across groups: %s",
10478
                          utils.CommaJoin(utils.NiceSort(previous_splits)))
10479

    
10480
  def Exec(self, feedback_fn):
10481
    """Assign nodes to a new group.
10482

10483
    """
10484
    for node in self.op.nodes:
10485
      self.node_data[node].group = self.group_uuid
10486

    
10487
    self.cfg.Update(self.group, feedback_fn) # Saves all modified nodes.
10488

    
10489
  @staticmethod
10490
  def CheckAssignmentForSplitInstances(changes, node_data, instance_data):
10491
    """Check for split instances after a node assignment.
10492

10493
    This method considers a series of node assignments as an atomic operation,
10494
    and returns information about split instances after applying the set of
10495
    changes.
10496

10497
    In particular, it returns information about newly split instances, and
10498
    instances that were already split, and remain so after the change.
10499

10500
    Only instances whose disk template is listed in constants.DTS_INT_MIRROR are
10501
    considered.
10502

10503
    @type changes: list of (node_name, new_group_uuid) pairs.
10504
    @param changes: list of node assignments to consider.
10505
    @param node_data: a dict with data for all nodes
10506
    @param instance_data: a dict with all instances to consider
10507
    @rtype: a two-tuple
10508
    @return: a list of instances that were previously okay and result split as a
10509
      consequence of this change, and a list of instances that were previously
10510
      split and this change does not fix.
10511

10512
    """
10513
    changed_nodes = dict((node, group) for node, group in changes
10514
                         if node_data[node].group != group)
10515

    
10516
    all_split_instances = set()
10517
    previously_split_instances = set()
10518

    
10519
    def InstanceNodes(instance):
10520
      return [instance.primary_node] + list(instance.secondary_nodes)
10521

    
10522
    for inst in instance_data.values():
10523
      if inst.disk_template not in constants.DTS_INT_MIRROR:
10524
        continue
10525

    
10526
      instance_nodes = InstanceNodes(inst)
10527

    
10528
      if len(set(node_data[node].group for node in instance_nodes)) > 1:
10529
        previously_split_instances.add(inst.name)
10530

    
10531
      if len(set(changed_nodes.get(node, node_data[node].group)
10532
                 for node in instance_nodes)) > 1:
10533
        all_split_instances.add(inst.name)
10534

    
10535
    return (list(all_split_instances - previously_split_instances),
10536
            list(previously_split_instances & all_split_instances))
10537

    
10538

    
10539
class _GroupQuery(_QueryBase):
10540
  FIELDS = query.GROUP_FIELDS
10541

    
10542
  def ExpandNames(self, lu):
10543
    lu.needed_locks = {}
10544

    
10545
    self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
10546
    name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values())
10547

    
10548
    if not self.names:
10549
      self.wanted = [name_to_uuid[name]
10550
                     for name in utils.NiceSort(name_to_uuid.keys())]
10551
    else:
10552
      # Accept names to be either names or UUIDs.
10553
      missing = []
10554
      self.wanted = []
10555
      all_uuid = frozenset(self._all_groups.keys())
10556

    
10557
      for name in self.names:
10558
        if name in all_uuid:
10559
          self.wanted.append(name)
10560
        elif name in name_to_uuid:
10561
          self.wanted.append(name_to_uuid[name])
10562
        else:
10563
          missing.append(name)
10564

    
10565
      if missing:
10566
        raise errors.OpPrereqError("Some groups do not exist: %s" % missing,
10567
                                   errors.ECODE_NOENT)
10568

    
10569
  def DeclareLocks(self, lu, level):
10570
    pass
10571

    
10572
  def _GetQueryData(self, lu):
10573
    """Computes the list of node groups and their attributes.
10574

10575
    """
10576
    do_nodes = query.GQ_NODE in self.requested_data
10577
    do_instances = query.GQ_INST in self.requested_data
10578

    
10579
    group_to_nodes = None
10580
    group_to_instances = None
10581

    
10582
    # For GQ_NODE, we need to map group->[nodes], and group->[instances] for
10583
    # GQ_INST. The former is attainable with just GetAllNodesInfo(), but for the
10584
    # latter GetAllInstancesInfo() is not enough, for we have to go through
10585
    # instance->node. Hence, we will need to process nodes even if we only need
10586
    # instance information.
10587
    if do_nodes or do_instances:
10588
      all_nodes = lu.cfg.GetAllNodesInfo()
10589
      group_to_nodes = dict((uuid, []) for uuid in self.wanted)
10590
      node_to_group = {}
10591

    
10592
      for node in all_nodes.values():
10593
        if node.group in group_to_nodes:
10594
          group_to_nodes[node.group].append(node.name)
10595
          node_to_group[node.name] = node.group
10596

    
10597
      if do_instances:
10598
        all_instances = lu.cfg.GetAllInstancesInfo()
10599
        group_to_instances = dict((uuid, []) for uuid in self.wanted)
10600

    
10601
        for instance in all_instances.values():
10602
          node = instance.primary_node
10603
          if node in node_to_group:
10604
            group_to_instances[node_to_group[node]].append(instance.name)
10605

    
10606
        if not do_nodes:
10607
          # Do not pass on node information if it was not requested.
10608
          group_to_nodes = None
10609

    
10610
    return query.GroupQueryData([self._all_groups[uuid]
10611
                                 for uuid in self.wanted],
10612
                                group_to_nodes, group_to_instances)
10613

    
10614

    
10615
class LUGroupQuery(NoHooksLU):
10616
  """Logical unit for querying node groups.
10617

10618
  """
10619
  REQ_BGL = False
10620

    
10621
  def CheckArguments(self):
10622
    self.gq = _GroupQuery(qlang.MakeSimpleFilter("name", self.op.names),
10623
                          self.op.output_fields, False)
10624

    
10625
  def ExpandNames(self):
10626
    self.gq.ExpandNames(self)
10627

    
10628
  def Exec(self, feedback_fn):
10629
    return self.gq.OldStyleQuery(self)
10630

    
10631

    
10632
class LUGroupSetParams(LogicalUnit):
10633
  """Modifies the parameters of a node group.
10634

10635
  """
10636
  HPATH = "group-modify"
10637
  HTYPE = constants.HTYPE_GROUP
10638
  REQ_BGL = False
10639

    
10640
  def CheckArguments(self):
10641
    all_changes = [
10642
      self.op.ndparams,
10643
      self.op.alloc_policy,
10644
      ]
10645

    
10646
    if all_changes.count(None) == len(all_changes):
10647
      raise errors.OpPrereqError("Please pass at least one modification",
10648
                                 errors.ECODE_INVAL)
10649

    
10650
  def ExpandNames(self):
10651
    # This raises errors.OpPrereqError on its own:
10652
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10653

    
10654
    self.needed_locks = {
10655
      locking.LEVEL_NODEGROUP: [self.group_uuid],
10656
      }
10657

    
10658
  def CheckPrereq(self):
10659
    """Check prerequisites.
10660

10661
    """
10662
    self.group = self.cfg.GetNodeGroup(self.group_uuid)
10663

    
10664
    if self.group is None:
10665
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
10666
                               (self.op.group_name, self.group_uuid))
10667

    
10668
    if self.op.ndparams:
10669
      new_ndparams = _GetUpdatedParams(self.group.ndparams, self.op.ndparams)
10670
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
10671
      self.new_ndparams = new_ndparams
10672

    
10673
  def BuildHooksEnv(self):
10674
    """Build hooks env.
10675

10676
    """
10677
    env = {
10678
      "GROUP_NAME": self.op.group_name,
10679
      "NEW_ALLOC_POLICY": self.op.alloc_policy,
10680
      }
10681
    mn = self.cfg.GetMasterNode()
10682
    return env, [mn], [mn]
10683

    
10684
  def Exec(self, feedback_fn):
10685
    """Modifies the node group.
10686

10687
    """
10688
    result = []
10689

    
10690
    if self.op.ndparams:
10691
      self.group.ndparams = self.new_ndparams
10692
      result.append(("ndparams", str(self.group.ndparams)))
10693

    
10694
    if self.op.alloc_policy:
10695
      self.group.alloc_policy = self.op.alloc_policy
10696

    
10697
    self.cfg.Update(self.group, feedback_fn)
10698
    return result
10699

    
10700

    
10701

    
10702
class LUGroupRemove(LogicalUnit):
10703
  HPATH = "group-remove"
10704
  HTYPE = constants.HTYPE_GROUP
10705
  REQ_BGL = False
10706

    
10707
  def ExpandNames(self):
10708
    # This will raises errors.OpPrereqError on its own:
10709
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10710
    self.needed_locks = {
10711
      locking.LEVEL_NODEGROUP: [self.group_uuid],
10712
      }
10713

    
10714
  def CheckPrereq(self):
10715
    """Check prerequisites.
10716

10717
    This checks that the given group name exists as a node group, that is
10718
    empty (i.e., contains no nodes), and that is not the last group of the
10719
    cluster.
10720

10721
    """
10722
    # Verify that the group is empty.
10723
    group_nodes = [node.name
10724
                   for node in self.cfg.GetAllNodesInfo().values()
10725
                   if node.group == self.group_uuid]
10726

    
10727
    if group_nodes:
10728
      raise errors.OpPrereqError("Group '%s' not empty, has the following"
10729
                                 " nodes: %s" %
10730
                                 (self.op.group_name,
10731
                                  utils.CommaJoin(utils.NiceSort(group_nodes))),
10732
                                 errors.ECODE_STATE)
10733

    
10734
    # Verify the cluster would not be left group-less.
10735
    if len(self.cfg.GetNodeGroupList()) == 1:
10736
      raise errors.OpPrereqError("Group '%s' is the only group,"
10737
                                 " cannot be removed" %
10738
                                 self.op.group_name,
10739
                                 errors.ECODE_STATE)
10740

    
10741
  def BuildHooksEnv(self):
10742
    """Build hooks env.
10743

10744
    """
10745
    env = {
10746
      "GROUP_NAME": self.op.group_name,
10747
      }
10748
    mn = self.cfg.GetMasterNode()
10749
    return env, [mn], [mn]
10750

    
10751
  def Exec(self, feedback_fn):
10752
    """Remove the node group.
10753

10754
    """
10755
    try:
10756
      self.cfg.RemoveNodeGroup(self.group_uuid)
10757
    except errors.ConfigurationError:
10758
      raise errors.OpExecError("Group '%s' with UUID %s disappeared" %
10759
                               (self.op.group_name, self.group_uuid))
10760

    
10761
    self.remove_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
10762

    
10763

    
10764
class LUGroupRename(LogicalUnit):
10765
  HPATH = "group-rename"
10766
  HTYPE = constants.HTYPE_GROUP
10767
  REQ_BGL = False
10768

    
10769
  def ExpandNames(self):
10770
    # This raises errors.OpPrereqError on its own:
10771
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10772

    
10773
    self.needed_locks = {
10774
      locking.LEVEL_NODEGROUP: [self.group_uuid],
10775
      }
10776

    
10777
  def CheckPrereq(self):
10778
    """Check prerequisites.
10779

10780
    Ensures requested new name is not yet used.
10781

10782
    """
10783
    try:
10784
      new_name_uuid = self.cfg.LookupNodeGroup(self.op.new_name)
10785
    except errors.OpPrereqError:
10786
      pass
10787
    else:
10788
      raise errors.OpPrereqError("Desired new name '%s' clashes with existing"
10789
                                 " node group (UUID: %s)" %
10790
                                 (self.op.new_name, new_name_uuid),
10791
                                 errors.ECODE_EXISTS)
10792

    
10793
  def BuildHooksEnv(self):
10794
    """Build hooks env.
10795

10796
    """
10797
    env = {
10798
      "OLD_NAME": self.op.group_name,
10799
      "NEW_NAME": self.op.new_name,
10800
      }
10801

    
10802
    mn = self.cfg.GetMasterNode()
10803
    all_nodes = self.cfg.GetAllNodesInfo()
10804
    run_nodes = [mn]
10805
    all_nodes.pop(mn, None)
10806

    
10807
    for node in all_nodes.values():
10808
      if node.group == self.group_uuid:
10809
        run_nodes.append(node.name)
10810

    
10811
    return env, run_nodes, run_nodes
10812

    
10813
  def Exec(self, feedback_fn):
10814
    """Rename the node group.
10815

10816
    """
10817
    group = self.cfg.GetNodeGroup(self.group_uuid)
10818

    
10819
    if group is None:
10820
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
10821
                               (self.op.group_name, self.group_uuid))
10822

    
10823
    group.name = self.op.new_name
10824
    self.cfg.Update(group, feedback_fn)
10825

    
10826
    return self.op.new_name
10827

    
10828

    
10829
class TagsLU(NoHooksLU): # pylint: disable-msg=W0223
10830
  """Generic tags LU.
10831

10832
  This is an abstract class which is the parent of all the other tags LUs.
10833

10834
  """
10835

    
10836
  def ExpandNames(self):
10837
    self.needed_locks = {}
10838
    if self.op.kind == constants.TAG_NODE:
10839
      self.op.name = _ExpandNodeName(self.cfg, self.op.name)
10840
      self.needed_locks[locking.LEVEL_NODE] = self.op.name
10841
    elif self.op.kind == constants.TAG_INSTANCE:
10842
      self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
10843
      self.needed_locks[locking.LEVEL_INSTANCE] = self.op.name
10844

    
10845
    # FIXME: Acquire BGL for cluster tag operations (as of this writing it's
10846
    # not possible to acquire the BGL based on opcode parameters)
10847

    
10848
  def CheckPrereq(self):
10849
    """Check prerequisites.
10850

10851
    """
10852
    if self.op.kind == constants.TAG_CLUSTER:
10853
      self.target = self.cfg.GetClusterInfo()
10854
    elif self.op.kind == constants.TAG_NODE:
10855
      self.target = self.cfg.GetNodeInfo(self.op.name)
10856
    elif self.op.kind == constants.TAG_INSTANCE:
10857
      self.target = self.cfg.GetInstanceInfo(self.op.name)
10858
    else:
10859
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
10860
                                 str(self.op.kind), errors.ECODE_INVAL)
10861

    
10862

    
10863
class LUTagsGet(TagsLU):
10864
  """Returns the tags of a given object.
10865

10866
  """
10867
  REQ_BGL = False
10868

    
10869
  def ExpandNames(self):
10870
    TagsLU.ExpandNames(self)
10871

    
10872
    # Share locks as this is only a read operation
10873
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
10874

    
10875
  def Exec(self, feedback_fn):
10876
    """Returns the tag list.
10877

10878
    """
10879
    return list(self.target.GetTags())
10880

    
10881

    
10882
class LUTagsSearch(NoHooksLU):
10883
  """Searches the tags for a given pattern.
10884

10885
  """
10886
  REQ_BGL = False
10887

    
10888
  def ExpandNames(self):
10889
    self.needed_locks = {}
10890

    
10891
  def CheckPrereq(self):
10892
    """Check prerequisites.
10893

10894
    This checks the pattern passed for validity by compiling it.
10895

10896
    """
10897
    try:
10898
      self.re = re.compile(self.op.pattern)
10899
    except re.error, err:
10900
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
10901
                                 (self.op.pattern, err), errors.ECODE_INVAL)
10902

    
10903
  def Exec(self, feedback_fn):
10904
    """Returns the tag list.
10905

10906
    """
10907
    cfg = self.cfg
10908
    tgts = [("/cluster", cfg.GetClusterInfo())]
10909
    ilist = cfg.GetAllInstancesInfo().values()
10910
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
10911
    nlist = cfg.GetAllNodesInfo().values()
10912
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
10913
    results = []
10914
    for path, target in tgts:
10915
      for tag in target.GetTags():
10916
        if self.re.search(tag):
10917
          results.append((path, tag))
10918
    return results
10919

    
10920

    
10921
class LUTagsSet(TagsLU):
10922
  """Sets a tag on a given object.
10923

10924
  """
10925
  REQ_BGL = False
10926

    
10927
  def CheckPrereq(self):
10928
    """Check prerequisites.
10929

10930
    This checks the type and length of the tag name and value.
10931

10932
    """
10933
    TagsLU.CheckPrereq(self)
10934
    for tag in self.op.tags:
10935
      objects.TaggableObject.ValidateTag(tag)
10936

    
10937
  def Exec(self, feedback_fn):
10938
    """Sets the tag.
10939

10940
    """
10941
    try:
10942
      for tag in self.op.tags:
10943
        self.target.AddTag(tag)
10944
    except errors.TagError, err:
10945
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
10946
    self.cfg.Update(self.target, feedback_fn)
10947

    
10948

    
10949
class LUTagsDel(TagsLU):
10950
  """Delete a list of tags from a given object.
10951

10952
  """
10953
  REQ_BGL = False
10954

    
10955
  def CheckPrereq(self):
10956
    """Check prerequisites.
10957

10958
    This checks that we have the given tag.
10959

10960
    """
10961
    TagsLU.CheckPrereq(self)
10962
    for tag in self.op.tags:
10963
      objects.TaggableObject.ValidateTag(tag)
10964
    del_tags = frozenset(self.op.tags)
10965
    cur_tags = self.target.GetTags()
10966

    
10967
    diff_tags = del_tags - cur_tags
10968
    if diff_tags:
10969
      diff_names = ("'%s'" % i for i in sorted(diff_tags))
10970
      raise errors.OpPrereqError("Tag(s) %s not found" %
10971
                                 (utils.CommaJoin(diff_names), ),
10972
                                 errors.ECODE_NOENT)
10973

    
10974
  def Exec(self, feedback_fn):
10975
    """Remove the tag from the object.
10976

10977
    """
10978
    for tag in self.op.tags:
10979
      self.target.RemoveTag(tag)
10980
    self.cfg.Update(self.target, feedback_fn)
10981

    
10982

    
10983
class LUTestDelay(NoHooksLU):
10984
  """Sleep for a specified amount of time.
10985

10986
  This LU sleeps on the master and/or nodes for a specified amount of
10987
  time.
10988

10989
  """
10990
  REQ_BGL = False
10991

    
10992
  def ExpandNames(self):
10993
    """Expand names and set required locks.
10994

10995
    This expands the node list, if any.
10996

10997
    """
10998
    self.needed_locks = {}
10999
    if self.op.on_nodes:
11000
      # _GetWantedNodes can be used here, but is not always appropriate to use
11001
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
11002
      # more information.
11003
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
11004
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
11005

    
11006
  def _TestDelay(self):
11007
    """Do the actual sleep.
11008

11009
    """
11010
    if self.op.on_master:
11011
      if not utils.TestDelay(self.op.duration):
11012
        raise errors.OpExecError("Error during master delay test")
11013
    if self.op.on_nodes:
11014
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
11015
      for node, node_result in result.items():
11016
        node_result.Raise("Failure during rpc call to node %s" % node)
11017

    
11018
  def Exec(self, feedback_fn):
11019
    """Execute the test delay opcode, with the wanted repetitions.
11020

11021
    """
11022
    if self.op.repeat == 0:
11023
      self._TestDelay()
11024
    else:
11025
      top_value = self.op.repeat - 1
11026
      for i in range(self.op.repeat):
11027
        self.LogInfo("Test delay iteration %d/%d" % (i, top_value))
11028
        self._TestDelay()
11029

    
11030

    
11031
class LUTestJqueue(NoHooksLU):
11032
  """Utility LU to test some aspects of the job queue.
11033

11034
  """
11035
  REQ_BGL = False
11036

    
11037
  # Must be lower than default timeout for WaitForJobChange to see whether it
11038
  # notices changed jobs
11039
  _CLIENT_CONNECT_TIMEOUT = 20.0
11040
  _CLIENT_CONFIRM_TIMEOUT = 60.0
11041

    
11042
  @classmethod
11043
  def _NotifyUsingSocket(cls, cb, errcls):
11044
    """Opens a Unix socket and waits for another program to connect.
11045

11046
    @type cb: callable
11047
    @param cb: Callback to send socket name to client
11048
    @type errcls: class
11049
    @param errcls: Exception class to use for errors
11050

11051
    """
11052
    # Using a temporary directory as there's no easy way to create temporary
11053
    # sockets without writing a custom loop around tempfile.mktemp and
11054
    # socket.bind
11055
    tmpdir = tempfile.mkdtemp()
11056
    try:
11057
      tmpsock = utils.PathJoin(tmpdir, "sock")
11058

    
11059
      logging.debug("Creating temporary socket at %s", tmpsock)
11060
      sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
11061
      try:
11062
        sock.bind(tmpsock)
11063
        sock.listen(1)
11064

    
11065
        # Send details to client
11066
        cb(tmpsock)
11067

    
11068
        # Wait for client to connect before continuing
11069
        sock.settimeout(cls._CLIENT_CONNECT_TIMEOUT)
11070
        try:
11071
          (conn, _) = sock.accept()
11072
        except socket.error, err:
11073
          raise errcls("Client didn't connect in time (%s)" % err)
11074
      finally:
11075
        sock.close()
11076
    finally:
11077
      # Remove as soon as client is connected
11078
      shutil.rmtree(tmpdir)
11079

    
11080
    # Wait for client to close
11081
    try:
11082
      try:
11083
        # pylint: disable-msg=E1101
11084
        # Instance of '_socketobject' has no ... member
11085
        conn.settimeout(cls._CLIENT_CONFIRM_TIMEOUT)
11086
        conn.recv(1)
11087
      except socket.error, err:
11088
        raise errcls("Client failed to confirm notification (%s)" % err)
11089
    finally:
11090
      conn.close()
11091

    
11092
  def _SendNotification(self, test, arg, sockname):
11093
    """Sends a notification to the client.
11094

11095
    @type test: string
11096
    @param test: Test name
11097
    @param arg: Test argument (depends on test)
11098
    @type sockname: string
11099
    @param sockname: Socket path
11100

11101
    """
11102
    self.Log(constants.ELOG_JQUEUE_TEST, (sockname, test, arg))
11103

    
11104
  def _Notify(self, prereq, test, arg):
11105
    """Notifies the client of a test.
11106

11107
    @type prereq: bool
11108
    @param prereq: Whether this is a prereq-phase test
11109
    @type test: string
11110
    @param test: Test name
11111
    @param arg: Test argument (depends on test)
11112

11113
    """
11114
    if prereq:
11115
      errcls = errors.OpPrereqError
11116
    else:
11117
      errcls = errors.OpExecError
11118

    
11119
    return self._NotifyUsingSocket(compat.partial(self._SendNotification,
11120
                                                  test, arg),
11121
                                   errcls)
11122

    
11123
  def CheckArguments(self):
11124
    self.checkargs_calls = getattr(self, "checkargs_calls", 0) + 1
11125
    self.expandnames_calls = 0
11126

    
11127
  def ExpandNames(self):
11128
    checkargs_calls = getattr(self, "checkargs_calls", 0)
11129
    if checkargs_calls < 1:
11130
      raise errors.ProgrammerError("CheckArguments was not called")
11131

    
11132
    self.expandnames_calls += 1
11133

    
11134
    if self.op.notify_waitlock:
11135
      self._Notify(True, constants.JQT_EXPANDNAMES, None)
11136

    
11137
    self.LogInfo("Expanding names")
11138

    
11139
    # Get lock on master node (just to get a lock, not for a particular reason)
11140
    self.needed_locks = {
11141
      locking.LEVEL_NODE: self.cfg.GetMasterNode(),
11142
      }
11143

    
11144
  def Exec(self, feedback_fn):
11145
    if self.expandnames_calls < 1:
11146
      raise errors.ProgrammerError("ExpandNames was not called")
11147

    
11148
    if self.op.notify_exec:
11149
      self._Notify(False, constants.JQT_EXEC, None)
11150

    
11151
    self.LogInfo("Executing")
11152

    
11153
    if self.op.log_messages:
11154
      self._Notify(False, constants.JQT_STARTMSG, len(self.op.log_messages))
11155
      for idx, msg in enumerate(self.op.log_messages):
11156
        self.LogInfo("Sending log message %s", idx + 1)
11157
        feedback_fn(constants.JQT_MSGPREFIX + msg)
11158
        # Report how many test messages have been sent
11159
        self._Notify(False, constants.JQT_LOGMSG, idx + 1)
11160

    
11161
    if self.op.fail:
11162
      raise errors.OpExecError("Opcode failure was requested")
11163

    
11164
    return True
11165

    
11166

    
11167
class IAllocator(object):
11168
  """IAllocator framework.
11169

11170
  An IAllocator instance has three sets of attributes:
11171
    - cfg that is needed to query the cluster
11172
    - input data (all members of the _KEYS class attribute are required)
11173
    - four buffer attributes (in|out_data|text), that represent the
11174
      input (to the external script) in text and data structure format,
11175
      and the output from it, again in two formats
11176
    - the result variables from the script (success, info, nodes) for
11177
      easy usage
11178

11179
  """
11180
  # pylint: disable-msg=R0902
11181
  # lots of instance attributes
11182
  _ALLO_KEYS = [
11183
    "name", "mem_size", "disks", "disk_template",
11184
    "os", "tags", "nics", "vcpus", "hypervisor",
11185
    ]
11186
  _RELO_KEYS = [
11187
    "name", "relocate_from",
11188
    ]
11189
  _EVAC_KEYS = [
11190
    "evac_nodes",
11191
    ]
11192

    
11193
  def __init__(self, cfg, rpc, mode, **kwargs):
11194
    self.cfg = cfg
11195
    self.rpc = rpc
11196
    # init buffer variables
11197
    self.in_text = self.out_text = self.in_data = self.out_data = None
11198
    # init all input fields so that pylint is happy
11199
    self.mode = mode
11200
    self.mem_size = self.disks = self.disk_template = None
11201
    self.os = self.tags = self.nics = self.vcpus = None
11202
    self.hypervisor = None
11203
    self.relocate_from = None
11204
    self.name = None
11205
    self.evac_nodes = None
11206
    # computed fields
11207
    self.required_nodes = None
11208
    # init result fields
11209
    self.success = self.info = self.result = None
11210
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
11211
      keyset = self._ALLO_KEYS
11212
      fn = self._AddNewInstance
11213
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
11214
      keyset = self._RELO_KEYS
11215
      fn = self._AddRelocateInstance
11216
    elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
11217
      keyset = self._EVAC_KEYS
11218
      fn = self._AddEvacuateNodes
11219
    else:
11220
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
11221
                                   " IAllocator" % self.mode)
11222
    for key in kwargs:
11223
      if key not in keyset:
11224
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
11225
                                     " IAllocator" % key)
11226
      setattr(self, key, kwargs[key])
11227

    
11228
    for key in keyset:
11229
      if key not in kwargs:
11230
        raise errors.ProgrammerError("Missing input parameter '%s' to"
11231
                                     " IAllocator" % key)
11232
    self._BuildInputData(fn)
11233

    
11234
  def _ComputeClusterData(self):
11235
    """Compute the generic allocator input data.
11236

11237
    This is the data that is independent of the actual operation.
11238

11239
    """
11240
    cfg = self.cfg
11241
    cluster_info = cfg.GetClusterInfo()
11242
    # cluster data
11243
    data = {
11244
      "version": constants.IALLOCATOR_VERSION,
11245
      "cluster_name": cfg.GetClusterName(),
11246
      "cluster_tags": list(cluster_info.GetTags()),
11247
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
11248
      # we don't have job IDs
11249
      }
11250
    ninfo = cfg.GetAllNodesInfo()
11251
    iinfo = cfg.GetAllInstancesInfo().values()
11252
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
11253

    
11254
    # node data
11255
    node_list = [n.name for n in ninfo.values() if n.vm_capable]
11256

    
11257
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
11258
      hypervisor_name = self.hypervisor
11259
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
11260
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
11261
    elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
11262
      hypervisor_name = cluster_info.enabled_hypervisors[0]
11263

    
11264
    node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
11265
                                        hypervisor_name)
11266
    node_iinfo = \
11267
      self.rpc.call_all_instances_info(node_list,
11268
                                       cluster_info.enabled_hypervisors)
11269

    
11270
    data["nodegroups"] = self._ComputeNodeGroupData(cfg)
11271

    
11272
    config_ndata = self._ComputeBasicNodeData(ninfo)
11273
    data["nodes"] = self._ComputeDynamicNodeData(ninfo, node_data, node_iinfo,
11274
                                                 i_list, config_ndata)
11275
    assert len(data["nodes"]) == len(ninfo), \
11276
        "Incomplete node data computed"
11277

    
11278
    data["instances"] = self._ComputeInstanceData(cluster_info, i_list)
11279

    
11280
    self.in_data = data
11281

    
11282
  @staticmethod
11283
  def _ComputeNodeGroupData(cfg):
11284
    """Compute node groups data.
11285

11286
    """
11287
    ng = {}
11288
    for guuid, gdata in cfg.GetAllNodeGroupsInfo().items():
11289
      ng[guuid] = {
11290
        "name": gdata.name,
11291
        "alloc_policy": gdata.alloc_policy,
11292
        }
11293
    return ng
11294

    
11295
  @staticmethod
11296
  def _ComputeBasicNodeData(node_cfg):
11297
    """Compute global node data.
11298

11299
    @rtype: dict
11300
    @returns: a dict of name: (node dict, node config)
11301

11302
    """
11303
    node_results = {}
11304
    for ninfo in node_cfg.values():
11305
      # fill in static (config-based) values
11306
      pnr = {
11307
        "tags": list(ninfo.GetTags()),
11308
        "primary_ip": ninfo.primary_ip,
11309
        "secondary_ip": ninfo.secondary_ip,
11310
        "offline": ninfo.offline,
11311
        "drained": ninfo.drained,
11312
        "master_candidate": ninfo.master_candidate,
11313
        "group": ninfo.group,
11314
        "master_capable": ninfo.master_capable,
11315
        "vm_capable": ninfo.vm_capable,
11316
        }
11317

    
11318
      node_results[ninfo.name] = pnr
11319

    
11320
    return node_results
11321

    
11322
  @staticmethod
11323
  def _ComputeDynamicNodeData(node_cfg, node_data, node_iinfo, i_list,
11324
                              node_results):
11325
    """Compute global node data.
11326

11327
    @param node_results: the basic node structures as filled from the config
11328

11329
    """
11330
    # make a copy of the current dict
11331
    node_results = dict(node_results)
11332
    for nname, nresult in node_data.items():
11333
      assert nname in node_results, "Missing basic data for node %s" % nname
11334
      ninfo = node_cfg[nname]
11335

    
11336
      if not (ninfo.offline or ninfo.drained):
11337
        nresult.Raise("Can't get data for node %s" % nname)
11338
        node_iinfo[nname].Raise("Can't get node instance info from node %s" %
11339
                                nname)
11340
        remote_info = nresult.payload
11341

    
11342
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
11343
                     'vg_size', 'vg_free', 'cpu_total']:
11344
          if attr not in remote_info:
11345
            raise errors.OpExecError("Node '%s' didn't return attribute"
11346
                                     " '%s'" % (nname, attr))
11347
          if not isinstance(remote_info[attr], int):
11348
            raise errors.OpExecError("Node '%s' returned invalid value"
11349
                                     " for '%s': %s" %
11350
                                     (nname, attr, remote_info[attr]))
11351
        # compute memory used by primary instances
11352
        i_p_mem = i_p_up_mem = 0
11353
        for iinfo, beinfo in i_list:
11354
          if iinfo.primary_node == nname:
11355
            i_p_mem += beinfo[constants.BE_MEMORY]
11356
            if iinfo.name not in node_iinfo[nname].payload:
11357
              i_used_mem = 0
11358
            else:
11359
              i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory'])
11360
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
11361
            remote_info['memory_free'] -= max(0, i_mem_diff)
11362

    
11363
            if iinfo.admin_up:
11364
              i_p_up_mem += beinfo[constants.BE_MEMORY]
11365

    
11366
        # compute memory used by instances
11367
        pnr_dyn = {
11368
          "total_memory": remote_info['memory_total'],
11369
          "reserved_memory": remote_info['memory_dom0'],
11370
          "free_memory": remote_info['memory_free'],
11371
          "total_disk": remote_info['vg_size'],
11372
          "free_disk": remote_info['vg_free'],
11373
          "total_cpus": remote_info['cpu_total'],
11374
          "i_pri_memory": i_p_mem,
11375
          "i_pri_up_memory": i_p_up_mem,
11376
          }
11377
        pnr_dyn.update(node_results[nname])
11378
        node_results[nname] = pnr_dyn
11379

    
11380
    return node_results
11381

    
11382
  @staticmethod
11383
  def _ComputeInstanceData(cluster_info, i_list):
11384
    """Compute global instance data.
11385

11386
    """
11387
    instance_data = {}
11388
    for iinfo, beinfo in i_list:
11389
      nic_data = []
11390
      for nic in iinfo.nics:
11391
        filled_params = cluster_info.SimpleFillNIC(nic.nicparams)
11392
        nic_dict = {"mac": nic.mac,
11393
                    "ip": nic.ip,
11394
                    "mode": filled_params[constants.NIC_MODE],
11395
                    "link": filled_params[constants.NIC_LINK],
11396
                   }
11397
        if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
11398
          nic_dict["bridge"] = filled_params[constants.NIC_LINK]
11399
        nic_data.append(nic_dict)
11400
      pir = {
11401
        "tags": list(iinfo.GetTags()),
11402
        "admin_up": iinfo.admin_up,
11403
        "vcpus": beinfo[constants.BE_VCPUS],
11404
        "memory": beinfo[constants.BE_MEMORY],
11405
        "os": iinfo.os,
11406
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
11407
        "nics": nic_data,
11408
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
11409
        "disk_template": iinfo.disk_template,
11410
        "hypervisor": iinfo.hypervisor,
11411
        }
11412
      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
11413
                                                 pir["disks"])
11414
      instance_data[iinfo.name] = pir
11415

    
11416
    return instance_data
11417

    
11418
  def _AddNewInstance(self):
11419
    """Add new instance data to allocator structure.
11420

11421
    This in combination with _AllocatorGetClusterData will create the
11422
    correct structure needed as input for the allocator.
11423

11424
    The checks for the completeness of the opcode must have already been
11425
    done.
11426

11427
    """
11428
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
11429

    
11430
    if self.disk_template in constants.DTS_INT_MIRROR:
11431
      self.required_nodes = 2
11432
    else:
11433
      self.required_nodes = 1
11434
    request = {
11435
      "name": self.name,
11436
      "disk_template": self.disk_template,
11437
      "tags": self.tags,
11438
      "os": self.os,
11439
      "vcpus": self.vcpus,
11440
      "memory": self.mem_size,
11441
      "disks": self.disks,
11442
      "disk_space_total": disk_space,
11443
      "nics": self.nics,
11444
      "required_nodes": self.required_nodes,
11445
      }
11446
    return request
11447

    
11448
  def _AddRelocateInstance(self):
11449
    """Add relocate instance data to allocator structure.
11450

11451
    This in combination with _IAllocatorGetClusterData will create the
11452
    correct structure needed as input for the allocator.
11453

11454
    The checks for the completeness of the opcode must have already been
11455
    done.
11456

11457
    """
11458
    instance = self.cfg.GetInstanceInfo(self.name)
11459
    if instance is None:
11460
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
11461
                                   " IAllocator" % self.name)
11462

    
11463
    if instance.disk_template not in constants.DTS_MIRRORED:
11464
      raise errors.OpPrereqError("Can't relocate non-mirrored instances",
11465
                                 errors.ECODE_INVAL)
11466

    
11467
    if instance.disk_template in constants.DTS_INT_MIRROR and \
11468
        len(instance.secondary_nodes) != 1:
11469
      raise errors.OpPrereqError("Instance has not exactly one secondary node",
11470
                                 errors.ECODE_STATE)
11471

    
11472
    self.required_nodes = 1
11473
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
11474
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
11475

    
11476
    request = {
11477
      "name": self.name,
11478
      "disk_space_total": disk_space,
11479
      "required_nodes": self.required_nodes,
11480
      "relocate_from": self.relocate_from,
11481
      }
11482
    return request
11483

    
11484
  def _AddEvacuateNodes(self):
11485
    """Add evacuate nodes data to allocator structure.
11486

11487
    """
11488
    request = {
11489
      "evac_nodes": self.evac_nodes
11490
      }
11491
    return request
11492

    
11493
  def _BuildInputData(self, fn):
11494
    """Build input data structures.
11495

11496
    """
11497
    self._ComputeClusterData()
11498

    
11499
    request = fn()
11500
    request["type"] = self.mode
11501
    self.in_data["request"] = request
11502

    
11503
    self.in_text = serializer.Dump(self.in_data)
11504

    
11505
  def Run(self, name, validate=True, call_fn=None):
11506
    """Run an instance allocator and return the results.
11507

11508
    """
11509
    if call_fn is None:
11510
      call_fn = self.rpc.call_iallocator_runner
11511

    
11512
    result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
11513
    result.Raise("Failure while running the iallocator script")
11514

    
11515
    self.out_text = result.payload
11516
    if validate:
11517
      self._ValidateResult()
11518

    
11519
  def _ValidateResult(self):
11520
    """Process the allocator results.
11521

11522
    This will process and if successful save the result in
11523
    self.out_data and the other parameters.
11524

11525
    """
11526
    try:
11527
      rdict = serializer.Load(self.out_text)
11528
    except Exception, err:
11529
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
11530

    
11531
    if not isinstance(rdict, dict):
11532
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
11533

    
11534
    # TODO: remove backwards compatiblity in later versions
11535
    if "nodes" in rdict and "result" not in rdict:
11536
      rdict["result"] = rdict["nodes"]
11537
      del rdict["nodes"]
11538

    
11539
    for key in "success", "info", "result":
11540
      if key not in rdict:
11541
        raise errors.OpExecError("Can't parse iallocator results:"
11542
                                 " missing key '%s'" % key)
11543
      setattr(self, key, rdict[key])
11544

    
11545
    if not isinstance(rdict["result"], list):
11546
      raise errors.OpExecError("Can't parse iallocator results: 'result' key"
11547
                               " is not a list")
11548
    self.out_data = rdict
11549

    
11550

    
11551
class LUTestAllocator(NoHooksLU):
11552
  """Run allocator tests.
11553

11554
  This LU runs the allocator tests
11555

11556
  """
11557
  def CheckPrereq(self):
11558
    """Check prerequisites.
11559

11560
    This checks the opcode parameters depending on the director and mode test.
11561

11562
    """
11563
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
11564
      for attr in ["mem_size", "disks", "disk_template",
11565
                   "os", "tags", "nics", "vcpus"]:
11566
        if not hasattr(self.op, attr):
11567
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
11568
                                     attr, errors.ECODE_INVAL)
11569
      iname = self.cfg.ExpandInstanceName(self.op.name)
11570
      if iname is not None:
11571
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
11572
                                   iname, errors.ECODE_EXISTS)
11573
      if not isinstance(self.op.nics, list):
11574
        raise errors.OpPrereqError("Invalid parameter 'nics'",
11575
                                   errors.ECODE_INVAL)
11576
      if not isinstance(self.op.disks, list):
11577
        raise errors.OpPrereqError("Invalid parameter 'disks'",
11578
                                   errors.ECODE_INVAL)
11579
      for row in self.op.disks:
11580
        if (not isinstance(row, dict) or
11581
            "size" not in row or
11582
            not isinstance(row["size"], int) or
11583
            "mode" not in row or
11584
            row["mode"] not in ['r', 'w']):
11585
          raise errors.OpPrereqError("Invalid contents of the 'disks'"
11586
                                     " parameter", errors.ECODE_INVAL)
11587
      if self.op.hypervisor is None:
11588
        self.op.hypervisor = self.cfg.GetHypervisorType()
11589
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
11590
      fname = _ExpandInstanceName(self.cfg, self.op.name)
11591
      self.op.name = fname
11592
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
11593
    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
11594
      if not hasattr(self.op, "evac_nodes"):
11595
        raise errors.OpPrereqError("Missing attribute 'evac_nodes' on"
11596
                                   " opcode input", errors.ECODE_INVAL)
11597
    else:
11598
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
11599
                                 self.op.mode, errors.ECODE_INVAL)
11600

    
11601
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
11602
      if self.op.allocator is None:
11603
        raise errors.OpPrereqError("Missing allocator name",
11604
                                   errors.ECODE_INVAL)
11605
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
11606
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
11607
                                 self.op.direction, errors.ECODE_INVAL)
11608

    
11609
  def Exec(self, feedback_fn):
11610
    """Run the allocator test.
11611

11612
    """
11613
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
11614
      ial = IAllocator(self.cfg, self.rpc,
11615
                       mode=self.op.mode,
11616
                       name=self.op.name,
11617
                       mem_size=self.op.mem_size,
11618
                       disks=self.op.disks,
11619
                       disk_template=self.op.disk_template,
11620
                       os=self.op.os,
11621
                       tags=self.op.tags,
11622
                       nics=self.op.nics,
11623
                       vcpus=self.op.vcpus,
11624
                       hypervisor=self.op.hypervisor,
11625
                       )
11626
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
11627
      ial = IAllocator(self.cfg, self.rpc,
11628
                       mode=self.op.mode,
11629
                       name=self.op.name,
11630
                       relocate_from=list(self.relocate_from),
11631
                       )
11632
    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
11633
      ial = IAllocator(self.cfg, self.rpc,
11634
                       mode=self.op.mode,
11635
                       evac_nodes=self.op.evac_nodes)
11636
    else:
11637
      raise errors.ProgrammerError("Uncatched mode %s in"
11638
                                   " LUTestAllocator.Exec", self.op.mode)
11639

    
11640
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
11641
      result = ial.in_text
11642
    else:
11643
      ial.Run(self.op.allocator, validate=False)
11644
      result = ial.out_text
11645
    return result
11646

    
11647

    
11648
#: Query type implementations
11649
_QUERY_IMPL = {
11650
  constants.QR_INSTANCE: _InstanceQuery,
11651
  constants.QR_NODE: _NodeQuery,
11652
  constants.QR_GROUP: _GroupQuery,
11653
  constants.QR_OS: _OsQuery,
11654
  }
11655

    
11656
assert set(_QUERY_IMPL.keys()) == constants.QR_OP_QUERY
11657

    
11658

    
11659
def _GetQueryImplementation(name):
11660
  """Returns the implemtnation for a query type.
11661

11662
  @param name: Query type, must be one of L{constants.QR_OP_QUERY}
11663

11664
  """
11665
  try:
11666
    return _QUERY_IMPL[name]
11667
  except KeyError:
11668
    raise errors.OpPrereqError("Unknown query resource '%s'" % name,
11669
                               errors.ECODE_INVAL)