Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ fbc263a9

History | View | Annotate | Download (400.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the master-side code."""
23

    
24
# pylint: disable-msg=W0201,C0302
25

    
26
# W0201 since most LU attributes are defined in CheckPrereq or similar
27
# functions
28

    
29
# C0302: since we have waaaay to many lines in this module
30

    
31
import os
32
import os.path
33
import time
34
import re
35
import platform
36
import logging
37
import copy
38
import OpenSSL
39
import socket
40
import tempfile
41
import shutil
42
import itertools
43

    
44
from ganeti import ssh
45
from ganeti import utils
46
from ganeti import errors
47
from ganeti import hypervisor
48
from ganeti import locking
49
from ganeti import constants
50
from ganeti import objects
51
from ganeti import serializer
52
from ganeti import ssconf
53
from ganeti import uidpool
54
from ganeti import compat
55
from ganeti import masterd
56
from ganeti import netutils
57
from ganeti import query
58
from ganeti import qlang
59
from ganeti import opcodes
60

    
61
import ganeti.masterd.instance # pylint: disable-msg=W0611
62

    
63

    
64
def _SupportsOob(cfg, node):
65
  """Tells if node supports OOB.
66

67
  @type cfg: L{config.ConfigWriter}
68
  @param cfg: The cluster configuration
69
  @type node: L{objects.Node}
70
  @param node: The node
71
  @return: The OOB script if supported or an empty string otherwise
72

73
  """
74
  return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
75

    
76

    
77
# End types
78
class LogicalUnit(object):
79
  """Logical Unit base class.
80

81
  Subclasses must follow these rules:
82
    - implement ExpandNames
83
    - implement CheckPrereq (except when tasklets are used)
84
    - implement Exec (except when tasklets are used)
85
    - implement BuildHooksEnv
86
    - redefine HPATH and HTYPE
87
    - optionally redefine their run requirements:
88
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
89

90
  Note that all commands require root permissions.
91

92
  @ivar dry_run_result: the value (if any) that will be returned to the caller
93
      in dry-run mode (signalled by opcode dry_run parameter)
94

95
  """
96
  HPATH = None
97
  HTYPE = None
98
  REQ_BGL = True
99

    
100
  def __init__(self, processor, op, context, rpc):
101
    """Constructor for LogicalUnit.
102

103
    This needs to be overridden in derived classes in order to check op
104
    validity.
105

106
    """
107
    self.proc = processor
108
    self.op = op
109
    self.cfg = context.cfg
110
    self.context = context
111
    self.rpc = rpc
112
    # Dicts used to declare locking needs to mcpu
113
    self.needed_locks = None
114
    self.acquired_locks = {}
115
    self.share_locks = dict.fromkeys(locking.LEVELS, 0)
116
    self.add_locks = {}
117
    self.remove_locks = {}
118
    # Used to force good behavior when calling helper functions
119
    self.recalculate_locks = {}
120
    self.__ssh = None
121
    # logging
122
    self.Log = processor.Log # pylint: disable-msg=C0103
123
    self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103
124
    self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103
125
    self.LogStep = processor.LogStep # pylint: disable-msg=C0103
126
    # support for dry-run
127
    self.dry_run_result = None
128
    # support for generic debug attribute
129
    if (not hasattr(self.op, "debug_level") or
130
        not isinstance(self.op.debug_level, int)):
131
      self.op.debug_level = 0
132

    
133
    # Tasklets
134
    self.tasklets = None
135

    
136
    # Validate opcode parameters and set defaults
137
    self.op.Validate(True)
138

    
139
    self.CheckArguments()
140

    
141
  def __GetSSH(self):
142
    """Returns the SshRunner object
143

144
    """
145
    if not self.__ssh:
146
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
147
    return self.__ssh
148

    
149
  ssh = property(fget=__GetSSH)
150

    
151
  def CheckArguments(self):
152
    """Check syntactic validity for the opcode arguments.
153

154
    This method is for doing a simple syntactic check and ensure
155
    validity of opcode parameters, without any cluster-related
156
    checks. While the same can be accomplished in ExpandNames and/or
157
    CheckPrereq, doing these separate is better because:
158

159
      - ExpandNames is left as as purely a lock-related function
160
      - CheckPrereq is run after we have acquired locks (and possible
161
        waited for them)
162

163
    The function is allowed to change the self.op attribute so that
164
    later methods can no longer worry about missing parameters.
165

166
    """
167
    pass
168

    
169
  def ExpandNames(self):
170
    """Expand names for this LU.
171

172
    This method is called before starting to execute the opcode, and it should
173
    update all the parameters of the opcode to their canonical form (e.g. a
174
    short node name must be fully expanded after this method has successfully
175
    completed). This way locking, hooks, logging, etc. can work correctly.
176

177
    LUs which implement this method must also populate the self.needed_locks
178
    member, as a dict with lock levels as keys, and a list of needed lock names
179
    as values. Rules:
180

181
      - use an empty dict if you don't need any lock
182
      - if you don't need any lock at a particular level omit that level
183
      - don't put anything for the BGL level
184
      - if you want all locks at a level use locking.ALL_SET as a value
185

186
    If you need to share locks (rather than acquire them exclusively) at one
187
    level you can modify self.share_locks, setting a true value (usually 1) for
188
    that level. By default locks are not shared.
189

190
    This function can also define a list of tasklets, which then will be
191
    executed in order instead of the usual LU-level CheckPrereq and Exec
192
    functions, if those are not defined by the LU.
193

194
    Examples::
195

196
      # Acquire all nodes and one instance
197
      self.needed_locks = {
198
        locking.LEVEL_NODE: locking.ALL_SET,
199
        locking.LEVEL_INSTANCE: ['instance1.example.com'],
200
      }
201
      # Acquire just two nodes
202
      self.needed_locks = {
203
        locking.LEVEL_NODE: ['node1.example.com', 'node2.example.com'],
204
      }
205
      # Acquire no locks
206
      self.needed_locks = {} # No, you can't leave it to the default value None
207

208
    """
209
    # The implementation of this method is mandatory only if the new LU is
210
    # concurrent, so that old LUs don't need to be changed all at the same
211
    # time.
212
    if self.REQ_BGL:
213
      self.needed_locks = {} # Exclusive LUs don't need locks.
214
    else:
215
      raise NotImplementedError
216

    
217
  def DeclareLocks(self, level):
218
    """Declare LU locking needs for a level
219

220
    While most LUs can just declare their locking needs at ExpandNames time,
221
    sometimes there's the need to calculate some locks after having acquired
222
    the ones before. This function is called just before acquiring locks at a
223
    particular level, but after acquiring the ones at lower levels, and permits
224
    such calculations. It can be used to modify self.needed_locks, and by
225
    default it does nothing.
226

227
    This function is only called if you have something already set in
228
    self.needed_locks for the level.
229

230
    @param level: Locking level which is going to be locked
231
    @type level: member of ganeti.locking.LEVELS
232

233
    """
234

    
235
  def CheckPrereq(self):
236
    """Check prerequisites for this LU.
237

238
    This method should check that the prerequisites for the execution
239
    of this LU are fulfilled. It can do internode communication, but
240
    it should be idempotent - no cluster or system changes are
241
    allowed.
242

243
    The method should raise errors.OpPrereqError in case something is
244
    not fulfilled. Its return value is ignored.
245

246
    This method should also update all the parameters of the opcode to
247
    their canonical form if it hasn't been done by ExpandNames before.
248

249
    """
250
    if self.tasklets is not None:
251
      for (idx, tl) in enumerate(self.tasklets):
252
        logging.debug("Checking prerequisites for tasklet %s/%s",
253
                      idx + 1, len(self.tasklets))
254
        tl.CheckPrereq()
255
    else:
256
      pass
257

    
258
  def Exec(self, feedback_fn):
259
    """Execute the LU.
260

261
    This method should implement the actual work. It should raise
262
    errors.OpExecError for failures that are somewhat dealt with in
263
    code, or expected.
264

265
    """
266
    if self.tasklets is not None:
267
      for (idx, tl) in enumerate(self.tasklets):
268
        logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
269
        tl.Exec(feedback_fn)
270
    else:
271
      raise NotImplementedError
272

    
273
  def BuildHooksEnv(self):
274
    """Build hooks environment for this LU.
275

276
    This method should return a three-node tuple consisting of: a dict
277
    containing the environment that will be used for running the
278
    specific hook for this LU, a list of node names on which the hook
279
    should run before the execution, and a list of node names on which
280
    the hook should run after the execution.
281

282
    The keys of the dict must not have 'GANETI_' prefixed as this will
283
    be handled in the hooks runner. Also note additional keys will be
284
    added by the hooks runner. If the LU doesn't define any
285
    environment, an empty dict (and not None) should be returned.
286

287
    No nodes should be returned as an empty list (and not None).
288

289
    Note that if the HPATH for a LU class is None, this function will
290
    not be called.
291

292
    """
293
    raise NotImplementedError
294

    
295
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
296
    """Notify the LU about the results of its hooks.
297

298
    This method is called every time a hooks phase is executed, and notifies
299
    the Logical Unit about the hooks' result. The LU can then use it to alter
300
    its result based on the hooks.  By default the method does nothing and the
301
    previous result is passed back unchanged but any LU can define it if it
302
    wants to use the local cluster hook-scripts somehow.
303

304
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
305
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
306
    @param hook_results: the results of the multi-node hooks rpc call
307
    @param feedback_fn: function used send feedback back to the caller
308
    @param lu_result: the previous Exec result this LU had, or None
309
        in the PRE phase
310
    @return: the new Exec result, based on the previous result
311
        and hook results
312

313
    """
314
    # API must be kept, thus we ignore the unused argument and could
315
    # be a function warnings
316
    # pylint: disable-msg=W0613,R0201
317
    return lu_result
318

    
319
  def _ExpandAndLockInstance(self):
320
    """Helper function to expand and lock an instance.
321

322
    Many LUs that work on an instance take its name in self.op.instance_name
323
    and need to expand it and then declare the expanded name for locking. This
324
    function does it, and then updates self.op.instance_name to the expanded
325
    name. It also initializes needed_locks as a dict, if this hasn't been done
326
    before.
327

328
    """
329
    if self.needed_locks is None:
330
      self.needed_locks = {}
331
    else:
332
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
333
        "_ExpandAndLockInstance called with instance-level locks set"
334
    self.op.instance_name = _ExpandInstanceName(self.cfg,
335
                                                self.op.instance_name)
336
    self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
337

    
338
  def _LockInstancesNodes(self, primary_only=False):
339
    """Helper function to declare instances' nodes for locking.
340

341
    This function should be called after locking one or more instances to lock
342
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
343
    with all primary or secondary nodes for instances already locked and
344
    present in self.needed_locks[locking.LEVEL_INSTANCE].
345

346
    It should be called from DeclareLocks, and for safety only works if
347
    self.recalculate_locks[locking.LEVEL_NODE] is set.
348

349
    In the future it may grow parameters to just lock some instance's nodes, or
350
    to just lock primaries or secondary nodes, if needed.
351

352
    If should be called in DeclareLocks in a way similar to::
353

354
      if level == locking.LEVEL_NODE:
355
        self._LockInstancesNodes()
356

357
    @type primary_only: boolean
358
    @param primary_only: only lock primary nodes of locked instances
359

360
    """
361
    assert locking.LEVEL_NODE in self.recalculate_locks, \
362
      "_LockInstancesNodes helper function called with no nodes to recalculate"
363

    
364
    # TODO: check if we're really been called with the instance locks held
365

    
366
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
367
    # future we might want to have different behaviors depending on the value
368
    # of self.recalculate_locks[locking.LEVEL_NODE]
369
    wanted_nodes = []
370
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
371
      instance = self.context.cfg.GetInstanceInfo(instance_name)
372
      wanted_nodes.append(instance.primary_node)
373
      if not primary_only:
374
        wanted_nodes.extend(instance.secondary_nodes)
375

    
376
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
377
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
378
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
379
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
380

    
381
    del self.recalculate_locks[locking.LEVEL_NODE]
382

    
383

    
384
class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223
385
  """Simple LU which runs no hooks.
386

387
  This LU is intended as a parent for other LogicalUnits which will
388
  run no hooks, in order to reduce duplicate code.
389

390
  """
391
  HPATH = None
392
  HTYPE = None
393

    
394
  def BuildHooksEnv(self):
395
    """Empty BuildHooksEnv for NoHooksLu.
396

397
    This just raises an error.
398

399
    """
400
    assert False, "BuildHooksEnv called for NoHooksLUs"
401

    
402

    
403
class Tasklet:
404
  """Tasklet base class.
405

406
  Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
407
  they can mix legacy code with tasklets. Locking needs to be done in the LU,
408
  tasklets know nothing about locks.
409

410
  Subclasses must follow these rules:
411
    - Implement CheckPrereq
412
    - Implement Exec
413

414
  """
415
  def __init__(self, lu):
416
    self.lu = lu
417

    
418
    # Shortcuts
419
    self.cfg = lu.cfg
420
    self.rpc = lu.rpc
421

    
422
  def CheckPrereq(self):
423
    """Check prerequisites for this tasklets.
424

425
    This method should check whether the prerequisites for the execution of
426
    this tasklet are fulfilled. It can do internode communication, but it
427
    should be idempotent - no cluster or system changes are allowed.
428

429
    The method should raise errors.OpPrereqError in case something is not
430
    fulfilled. Its return value is ignored.
431

432
    This method should also update all parameters to their canonical form if it
433
    hasn't been done before.
434

435
    """
436
    pass
437

    
438
  def Exec(self, feedback_fn):
439
    """Execute the tasklet.
440

441
    This method should implement the actual work. It should raise
442
    errors.OpExecError for failures that are somewhat dealt with in code, or
443
    expected.
444

445
    """
446
    raise NotImplementedError
447

    
448

    
449
class _QueryBase:
450
  """Base for query utility classes.
451

452
  """
453
  #: Attribute holding field definitions
454
  FIELDS = None
455

    
456
  def __init__(self, filter_, fields, use_locking):
457
    """Initializes this class.
458

459
    """
460
    self.use_locking = use_locking
461

    
462
    self.query = query.Query(self.FIELDS, fields, filter_=filter_,
463
                             namefield="name")
464
    self.requested_data = self.query.RequestedData()
465
    self.names = self.query.RequestedNames()
466

    
467
    # Sort only if no names were requested
468
    self.sort_by_name = not self.names
469

    
470
    self.do_locking = None
471
    self.wanted = None
472

    
473
  def _GetNames(self, lu, all_names, lock_level):
474
    """Helper function to determine names asked for in the query.
475

476
    """
477
    if self.do_locking:
478
      names = lu.acquired_locks[lock_level]
479
    else:
480
      names = all_names
481

    
482
    if self.wanted == locking.ALL_SET:
483
      assert not self.names
484
      # caller didn't specify names, so ordering is not important
485
      return utils.NiceSort(names)
486

    
487
    # caller specified names and we must keep the same order
488
    assert self.names
489
    assert not self.do_locking or lu.acquired_locks[lock_level]
490

    
491
    missing = set(self.wanted).difference(names)
492
    if missing:
493
      raise errors.OpExecError("Some items were removed before retrieving"
494
                               " their data: %s" % missing)
495

    
496
    # Return expanded names
497
    return self.wanted
498

    
499
  @classmethod
500
  def FieldsQuery(cls, fields):
501
    """Returns list of available fields.
502

503
    @return: List of L{objects.QueryFieldDefinition}
504

505
    """
506
    return query.QueryFields(cls.FIELDS, fields)
507

    
508
  def ExpandNames(self, lu):
509
    """Expand names for this query.
510

511
    See L{LogicalUnit.ExpandNames}.
512

513
    """
514
    raise NotImplementedError()
515

    
516
  def DeclareLocks(self, lu, level):
517
    """Declare locks for this query.
518

519
    See L{LogicalUnit.DeclareLocks}.
520

521
    """
522
    raise NotImplementedError()
523

    
524
  def _GetQueryData(self, lu):
525
    """Collects all data for this query.
526

527
    @return: Query data object
528

529
    """
530
    raise NotImplementedError()
531

    
532
  def NewStyleQuery(self, lu):
533
    """Collect data and execute query.
534

535
    """
536
    return query.GetQueryResponse(self.query, self._GetQueryData(lu),
537
                                  sort_by_name=self.sort_by_name)
538

    
539
  def OldStyleQuery(self, lu):
540
    """Collect data and execute query.
541

542
    """
543
    return self.query.OldStyleQuery(self._GetQueryData(lu),
544
                                    sort_by_name=self.sort_by_name)
545

    
546

    
547
def _GetWantedNodes(lu, nodes):
548
  """Returns list of checked and expanded node names.
549

550
  @type lu: L{LogicalUnit}
551
  @param lu: the logical unit on whose behalf we execute
552
  @type nodes: list
553
  @param nodes: list of node names or None for all nodes
554
  @rtype: list
555
  @return: the list of nodes, sorted
556
  @raise errors.ProgrammerError: if the nodes parameter is wrong type
557

558
  """
559
  if nodes:
560
    return [_ExpandNodeName(lu.cfg, name) for name in nodes]
561

    
562
  return utils.NiceSort(lu.cfg.GetNodeList())
563

    
564

    
565
def _GetWantedInstances(lu, instances):
566
  """Returns list of checked and expanded instance names.
567

568
  @type lu: L{LogicalUnit}
569
  @param lu: the logical unit on whose behalf we execute
570
  @type instances: list
571
  @param instances: list of instance names or None for all instances
572
  @rtype: list
573
  @return: the list of instances, sorted
574
  @raise errors.OpPrereqError: if the instances parameter is wrong type
575
  @raise errors.OpPrereqError: if any of the passed instances is not found
576

577
  """
578
  if instances:
579
    wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
580
  else:
581
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
582
  return wanted
583

    
584

    
585
def _GetUpdatedParams(old_params, update_dict,
586
                      use_default=True, use_none=False):
587
  """Return the new version of a parameter dictionary.
588

589
  @type old_params: dict
590
  @param old_params: old parameters
591
  @type update_dict: dict
592
  @param update_dict: dict containing new parameter values, or
593
      constants.VALUE_DEFAULT to reset the parameter to its default
594
      value
595
  @param use_default: boolean
596
  @type use_default: whether to recognise L{constants.VALUE_DEFAULT}
597
      values as 'to be deleted' values
598
  @param use_none: boolean
599
  @type use_none: whether to recognise C{None} values as 'to be
600
      deleted' values
601
  @rtype: dict
602
  @return: the new parameter dictionary
603

604
  """
605
  params_copy = copy.deepcopy(old_params)
606
  for key, val in update_dict.iteritems():
607
    if ((use_default and val == constants.VALUE_DEFAULT) or
608
        (use_none and val is None)):
609
      try:
610
        del params_copy[key]
611
      except KeyError:
612
        pass
613
    else:
614
      params_copy[key] = val
615
  return params_copy
616

    
617

    
618
def _CheckOutputFields(static, dynamic, selected):
619
  """Checks whether all selected fields are valid.
620

621
  @type static: L{utils.FieldSet}
622
  @param static: static fields set
623
  @type dynamic: L{utils.FieldSet}
624
  @param dynamic: dynamic fields set
625

626
  """
627
  f = utils.FieldSet()
628
  f.Extend(static)
629
  f.Extend(dynamic)
630

    
631
  delta = f.NonMatching(selected)
632
  if delta:
633
    raise errors.OpPrereqError("Unknown output fields selected: %s"
634
                               % ",".join(delta), errors.ECODE_INVAL)
635

    
636

    
637
def _CheckGlobalHvParams(params):
638
  """Validates that given hypervisor params are not global ones.
639

640
  This will ensure that instances don't get customised versions of
641
  global params.
642

643
  """
644
  used_globals = constants.HVC_GLOBALS.intersection(params)
645
  if used_globals:
646
    msg = ("The following hypervisor parameters are global and cannot"
647
           " be customized at instance level, please modify them at"
648
           " cluster level: %s" % utils.CommaJoin(used_globals))
649
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
650

    
651

    
652
def _CheckNodeOnline(lu, node, msg=None):
653
  """Ensure that a given node is online.
654

655
  @param lu: the LU on behalf of which we make the check
656
  @param node: the node to check
657
  @param msg: if passed, should be a message to replace the default one
658
  @raise errors.OpPrereqError: if the node is offline
659

660
  """
661
  if msg is None:
662
    msg = "Can't use offline node"
663
  if lu.cfg.GetNodeInfo(node).offline:
664
    raise errors.OpPrereqError("%s: %s" % (msg, node), errors.ECODE_STATE)
665

    
666

    
667
def _CheckNodeNotDrained(lu, node):
668
  """Ensure that a given node is not drained.
669

670
  @param lu: the LU on behalf of which we make the check
671
  @param node: the node to check
672
  @raise errors.OpPrereqError: if the node is drained
673

674
  """
675
  if lu.cfg.GetNodeInfo(node).drained:
676
    raise errors.OpPrereqError("Can't use drained node %s" % node,
677
                               errors.ECODE_STATE)
678

    
679

    
680
def _CheckNodeVmCapable(lu, node):
681
  """Ensure that a given node is vm capable.
682

683
  @param lu: the LU on behalf of which we make the check
684
  @param node: the node to check
685
  @raise errors.OpPrereqError: if the node is not vm capable
686

687
  """
688
  if not lu.cfg.GetNodeInfo(node).vm_capable:
689
    raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node,
690
                               errors.ECODE_STATE)
691

    
692

    
693
def _CheckNodeHasOS(lu, node, os_name, force_variant):
694
  """Ensure that a node supports a given OS.
695

696
  @param lu: the LU on behalf of which we make the check
697
  @param node: the node to check
698
  @param os_name: the OS to query about
699
  @param force_variant: whether to ignore variant errors
700
  @raise errors.OpPrereqError: if the node is not supporting the OS
701

702
  """
703
  result = lu.rpc.call_os_get(node, os_name)
704
  result.Raise("OS '%s' not in supported OS list for node %s" %
705
               (os_name, node),
706
               prereq=True, ecode=errors.ECODE_INVAL)
707
  if not force_variant:
708
    _CheckOSVariant(result.payload, os_name)
709

    
710

    
711
def _CheckNodeHasSecondaryIP(lu, node, secondary_ip, prereq):
712
  """Ensure that a node has the given secondary ip.
713

714
  @type lu: L{LogicalUnit}
715
  @param lu: the LU on behalf of which we make the check
716
  @type node: string
717
  @param node: the node to check
718
  @type secondary_ip: string
719
  @param secondary_ip: the ip to check
720
  @type prereq: boolean
721
  @param prereq: whether to throw a prerequisite or an execute error
722
  @raise errors.OpPrereqError: if the node doesn't have the ip, and prereq=True
723
  @raise errors.OpExecError: if the node doesn't have the ip, and prereq=False
724

725
  """
726
  result = lu.rpc.call_node_has_ip_address(node, secondary_ip)
727
  result.Raise("Failure checking secondary ip on node %s" % node,
728
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
729
  if not result.payload:
730
    msg = ("Node claims it doesn't have the secondary ip you gave (%s),"
731
           " please fix and re-run this command" % secondary_ip)
732
    if prereq:
733
      raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
734
    else:
735
      raise errors.OpExecError(msg)
736

    
737

    
738
def _GetClusterDomainSecret():
739
  """Reads the cluster domain secret.
740

741
  """
742
  return utils.ReadOneLineFile(constants.CLUSTER_DOMAIN_SECRET_FILE,
743
                               strict=True)
744

    
745

    
746
def _CheckInstanceDown(lu, instance, reason):
747
  """Ensure that an instance is not running."""
748
  if instance.admin_up:
749
    raise errors.OpPrereqError("Instance %s is marked to be up, %s" %
750
                               (instance.name, reason), errors.ECODE_STATE)
751

    
752
  pnode = instance.primary_node
753
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
754
  ins_l.Raise("Can't contact node %s for instance information" % pnode,
755
              prereq=True, ecode=errors.ECODE_ENVIRON)
756

    
757
  if instance.name in ins_l.payload:
758
    raise errors.OpPrereqError("Instance %s is running, %s" %
759
                               (instance.name, reason), errors.ECODE_STATE)
760

    
761

    
762
def _ExpandItemName(fn, name, kind):
763
  """Expand an item name.
764

765
  @param fn: the function to use for expansion
766
  @param name: requested item name
767
  @param kind: text description ('Node' or 'Instance')
768
  @return: the resolved (full) name
769
  @raise errors.OpPrereqError: if the item is not found
770

771
  """
772
  full_name = fn(name)
773
  if full_name is None:
774
    raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
775
                               errors.ECODE_NOENT)
776
  return full_name
777

    
778

    
779
def _ExpandNodeName(cfg, name):
780
  """Wrapper over L{_ExpandItemName} for nodes."""
781
  return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
782

    
783

    
784
def _ExpandInstanceName(cfg, name):
785
  """Wrapper over L{_ExpandItemName} for instance."""
786
  return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
787

    
788

    
789
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
790
                          memory, vcpus, nics, disk_template, disks,
791
                          bep, hvp, hypervisor_name):
792
  """Builds instance related env variables for hooks
793

794
  This builds the hook environment from individual variables.
795

796
  @type name: string
797
  @param name: the name of the instance
798
  @type primary_node: string
799
  @param primary_node: the name of the instance's primary node
800
  @type secondary_nodes: list
801
  @param secondary_nodes: list of secondary nodes as strings
802
  @type os_type: string
803
  @param os_type: the name of the instance's OS
804
  @type status: boolean
805
  @param status: the should_run status of the instance
806
  @type memory: string
807
  @param memory: the memory size of the instance
808
  @type vcpus: string
809
  @param vcpus: the count of VCPUs the instance has
810
  @type nics: list
811
  @param nics: list of tuples (ip, mac, mode, link) representing
812
      the NICs the instance has
813
  @type disk_template: string
814
  @param disk_template: the disk template of the instance
815
  @type disks: list
816
  @param disks: the list of (size, mode) pairs
817
  @type bep: dict
818
  @param bep: the backend parameters for the instance
819
  @type hvp: dict
820
  @param hvp: the hypervisor parameters for the instance
821
  @type hypervisor_name: string
822
  @param hypervisor_name: the hypervisor for the instance
823
  @rtype: dict
824
  @return: the hook environment for this instance
825

826
  """
827
  if status:
828
    str_status = "up"
829
  else:
830
    str_status = "down"
831
  env = {
832
    "OP_TARGET": name,
833
    "INSTANCE_NAME": name,
834
    "INSTANCE_PRIMARY": primary_node,
835
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
836
    "INSTANCE_OS_TYPE": os_type,
837
    "INSTANCE_STATUS": str_status,
838
    "INSTANCE_MEMORY": memory,
839
    "INSTANCE_VCPUS": vcpus,
840
    "INSTANCE_DISK_TEMPLATE": disk_template,
841
    "INSTANCE_HYPERVISOR": hypervisor_name,
842
  }
843

    
844
  if nics:
845
    nic_count = len(nics)
846
    for idx, (ip, mac, mode, link) in enumerate(nics):
847
      if ip is None:
848
        ip = ""
849
      env["INSTANCE_NIC%d_IP" % idx] = ip
850
      env["INSTANCE_NIC%d_MAC" % idx] = mac
851
      env["INSTANCE_NIC%d_MODE" % idx] = mode
852
      env["INSTANCE_NIC%d_LINK" % idx] = link
853
      if mode == constants.NIC_MODE_BRIDGED:
854
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
855
  else:
856
    nic_count = 0
857

    
858
  env["INSTANCE_NIC_COUNT"] = nic_count
859

    
860
  if disks:
861
    disk_count = len(disks)
862
    for idx, (size, mode) in enumerate(disks):
863
      env["INSTANCE_DISK%d_SIZE" % idx] = size
864
      env["INSTANCE_DISK%d_MODE" % idx] = mode
865
  else:
866
    disk_count = 0
867

    
868
  env["INSTANCE_DISK_COUNT"] = disk_count
869

    
870
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
871
    for key, value in source.items():
872
      env["INSTANCE_%s_%s" % (kind, key)] = value
873

    
874
  return env
875

    
876

    
877
def _NICListToTuple(lu, nics):
878
  """Build a list of nic information tuples.
879

880
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
881
  value in LUInstanceQueryData.
882

883
  @type lu:  L{LogicalUnit}
884
  @param lu: the logical unit on whose behalf we execute
885
  @type nics: list of L{objects.NIC}
886
  @param nics: list of nics to convert to hooks tuples
887

888
  """
889
  hooks_nics = []
890
  cluster = lu.cfg.GetClusterInfo()
891
  for nic in nics:
892
    ip = nic.ip
893
    mac = nic.mac
894
    filled_params = cluster.SimpleFillNIC(nic.nicparams)
895
    mode = filled_params[constants.NIC_MODE]
896
    link = filled_params[constants.NIC_LINK]
897
    hooks_nics.append((ip, mac, mode, link))
898
  return hooks_nics
899

    
900

    
901
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
902
  """Builds instance related env variables for hooks from an object.
903

904
  @type lu: L{LogicalUnit}
905
  @param lu: the logical unit on whose behalf we execute
906
  @type instance: L{objects.Instance}
907
  @param instance: the instance for which we should build the
908
      environment
909
  @type override: dict
910
  @param override: dictionary with key/values that will override
911
      our values
912
  @rtype: dict
913
  @return: the hook environment dictionary
914

915
  """
916
  cluster = lu.cfg.GetClusterInfo()
917
  bep = cluster.FillBE(instance)
918
  hvp = cluster.FillHV(instance)
919
  args = {
920
    'name': instance.name,
921
    'primary_node': instance.primary_node,
922
    'secondary_nodes': instance.secondary_nodes,
923
    'os_type': instance.os,
924
    'status': instance.admin_up,
925
    'memory': bep[constants.BE_MEMORY],
926
    'vcpus': bep[constants.BE_VCPUS],
927
    'nics': _NICListToTuple(lu, instance.nics),
928
    'disk_template': instance.disk_template,
929
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
930
    'bep': bep,
931
    'hvp': hvp,
932
    'hypervisor_name': instance.hypervisor,
933
  }
934
  if override:
935
    args.update(override)
936
  return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142
937

    
938

    
939
def _AdjustCandidatePool(lu, exceptions):
940
  """Adjust the candidate pool after node operations.
941

942
  """
943
  mod_list = lu.cfg.MaintainCandidatePool(exceptions)
944
  if mod_list:
945
    lu.LogInfo("Promoted nodes to master candidate role: %s",
946
               utils.CommaJoin(node.name for node in mod_list))
947
    for name in mod_list:
948
      lu.context.ReaddNode(name)
949
  mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
950
  if mc_now > mc_max:
951
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
952
               (mc_now, mc_max))
953

    
954

    
955
def _DecideSelfPromotion(lu, exceptions=None):
956
  """Decide whether I should promote myself as a master candidate.
957

958
  """
959
  cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
960
  mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
961
  # the new node will increase mc_max with one, so:
962
  mc_should = min(mc_should + 1, cp_size)
963
  return mc_now < mc_should
964

    
965

    
966
def _CheckNicsBridgesExist(lu, target_nics, target_node):
967
  """Check that the brigdes needed by a list of nics exist.
968

969
  """
970
  cluster = lu.cfg.GetClusterInfo()
971
  paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in target_nics]
972
  brlist = [params[constants.NIC_LINK] for params in paramslist
973
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
974
  if brlist:
975
    result = lu.rpc.call_bridges_exist(target_node, brlist)
976
    result.Raise("Error checking bridges on destination node '%s'" %
977
                 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
978

    
979

    
980
def _CheckInstanceBridgesExist(lu, instance, node=None):
981
  """Check that the brigdes needed by an instance exist.
982

983
  """
984
  if node is None:
985
    node = instance.primary_node
986
  _CheckNicsBridgesExist(lu, instance.nics, node)
987

    
988

    
989
def _CheckOSVariant(os_obj, name):
990
  """Check whether an OS name conforms to the os variants specification.
991

992
  @type os_obj: L{objects.OS}
993
  @param os_obj: OS object to check
994
  @type name: string
995
  @param name: OS name passed by the user, to check for validity
996

997
  """
998
  if not os_obj.supported_variants:
999
    return
1000
  variant = objects.OS.GetVariant(name)
1001
  if not variant:
1002
    raise errors.OpPrereqError("OS name must include a variant",
1003
                               errors.ECODE_INVAL)
1004

    
1005
  if variant not in os_obj.supported_variants:
1006
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
1007

    
1008

    
1009
def _GetNodeInstancesInner(cfg, fn):
1010
  return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
1011

    
1012

    
1013
def _GetNodeInstances(cfg, node_name):
1014
  """Returns a list of all primary and secondary instances on a node.
1015

1016
  """
1017

    
1018
  return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
1019

    
1020

    
1021
def _GetNodePrimaryInstances(cfg, node_name):
1022
  """Returns primary instances on a node.
1023

1024
  """
1025
  return _GetNodeInstancesInner(cfg,
1026
                                lambda inst: node_name == inst.primary_node)
1027

    
1028

    
1029
def _GetNodeSecondaryInstances(cfg, node_name):
1030
  """Returns secondary instances on a node.
1031

1032
  """
1033
  return _GetNodeInstancesInner(cfg,
1034
                                lambda inst: node_name in inst.secondary_nodes)
1035

    
1036

    
1037
def _GetStorageTypeArgs(cfg, storage_type):
1038
  """Returns the arguments for a storage type.
1039

1040
  """
1041
  # Special case for file storage
1042
  if storage_type == constants.ST_FILE:
1043
    # storage.FileStorage wants a list of storage directories
1044
    return [[cfg.GetFileStorageDir(), cfg.GetSharedFileStorageDir()]]
1045

    
1046
  return []
1047

    
1048

    
1049
def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
1050
  faulty = []
1051

    
1052
  for dev in instance.disks:
1053
    cfg.SetDiskID(dev, node_name)
1054

    
1055
  result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
1056
  result.Raise("Failed to get disk status from node %s" % node_name,
1057
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
1058

    
1059
  for idx, bdev_status in enumerate(result.payload):
1060
    if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
1061
      faulty.append(idx)
1062

    
1063
  return faulty
1064

    
1065

    
1066
def _CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
1067
  """Check the sanity of iallocator and node arguments and use the
1068
  cluster-wide iallocator if appropriate.
1069

1070
  Check that at most one of (iallocator, node) is specified. If none is
1071
  specified, then the LU's opcode's iallocator slot is filled with the
1072
  cluster-wide default iallocator.
1073

1074
  @type iallocator_slot: string
1075
  @param iallocator_slot: the name of the opcode iallocator slot
1076
  @type node_slot: string
1077
  @param node_slot: the name of the opcode target node slot
1078

1079
  """
1080
  node = getattr(lu.op, node_slot, None)
1081
  iallocator = getattr(lu.op, iallocator_slot, None)
1082

    
1083
  if node is not None and iallocator is not None:
1084
    raise errors.OpPrereqError("Do not specify both, iallocator and node.",
1085
                               errors.ECODE_INVAL)
1086
  elif node is None and iallocator is None:
1087
    default_iallocator = lu.cfg.GetDefaultIAllocator()
1088
    if default_iallocator:
1089
      setattr(lu.op, iallocator_slot, default_iallocator)
1090
    else:
1091
      raise errors.OpPrereqError("No iallocator or node given and no"
1092
                                 " cluster-wide default iallocator found."
1093
                                 " Please specify either an iallocator or a"
1094
                                 " node, or set a cluster-wide default"
1095
                                 " iallocator.")
1096

    
1097

    
1098
class LUClusterPostInit(LogicalUnit):
1099
  """Logical unit for running hooks after cluster initialization.
1100

1101
  """
1102
  HPATH = "cluster-init"
1103
  HTYPE = constants.HTYPE_CLUSTER
1104

    
1105
  def BuildHooksEnv(self):
1106
    """Build hooks env.
1107

1108
    """
1109
    env = {"OP_TARGET": self.cfg.GetClusterName()}
1110
    mn = self.cfg.GetMasterNode()
1111
    return env, [], [mn]
1112

    
1113
  def Exec(self, feedback_fn):
1114
    """Nothing to do.
1115

1116
    """
1117
    return True
1118

    
1119

    
1120
class LUClusterDestroy(LogicalUnit):
1121
  """Logical unit for destroying the cluster.
1122

1123
  """
1124
  HPATH = "cluster-destroy"
1125
  HTYPE = constants.HTYPE_CLUSTER
1126

    
1127
  def BuildHooksEnv(self):
1128
    """Build hooks env.
1129

1130
    """
1131
    env = {"OP_TARGET": self.cfg.GetClusterName()}
1132
    return env, [], []
1133

    
1134
  def CheckPrereq(self):
1135
    """Check prerequisites.
1136

1137
    This checks whether the cluster is empty.
1138

1139
    Any errors are signaled by raising errors.OpPrereqError.
1140

1141
    """
1142
    master = self.cfg.GetMasterNode()
1143

    
1144
    nodelist = self.cfg.GetNodeList()
1145
    if len(nodelist) != 1 or nodelist[0] != master:
1146
      raise errors.OpPrereqError("There are still %d node(s) in"
1147
                                 " this cluster." % (len(nodelist) - 1),
1148
                                 errors.ECODE_INVAL)
1149
    instancelist = self.cfg.GetInstanceList()
1150
    if instancelist:
1151
      raise errors.OpPrereqError("There are still %d instance(s) in"
1152
                                 " this cluster." % len(instancelist),
1153
                                 errors.ECODE_INVAL)
1154

    
1155
  def Exec(self, feedback_fn):
1156
    """Destroys the cluster.
1157

1158
    """
1159
    master = self.cfg.GetMasterNode()
1160

    
1161
    # Run post hooks on master node before it's removed
1162
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
1163
    try:
1164
      hm.RunPhase(constants.HOOKS_PHASE_POST, [master])
1165
    except:
1166
      # pylint: disable-msg=W0702
1167
      self.LogWarning("Errors occurred running hooks on %s" % master)
1168

    
1169
    result = self.rpc.call_node_stop_master(master, False)
1170
    result.Raise("Could not disable the master role")
1171

    
1172
    return master
1173

    
1174

    
1175
def _VerifyCertificate(filename):
1176
  """Verifies a certificate for LUClusterVerify.
1177

1178
  @type filename: string
1179
  @param filename: Path to PEM file
1180

1181
  """
1182
  try:
1183
    cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
1184
                                           utils.ReadFile(filename))
1185
  except Exception, err: # pylint: disable-msg=W0703
1186
    return (LUClusterVerify.ETYPE_ERROR,
1187
            "Failed to load X509 certificate %s: %s" % (filename, err))
1188

    
1189
  (errcode, msg) = \
1190
    utils.VerifyX509Certificate(cert, constants.SSL_CERT_EXPIRATION_WARN,
1191
                                constants.SSL_CERT_EXPIRATION_ERROR)
1192

    
1193
  if msg:
1194
    fnamemsg = "While verifying %s: %s" % (filename, msg)
1195
  else:
1196
    fnamemsg = None
1197

    
1198
  if errcode is None:
1199
    return (None, fnamemsg)
1200
  elif errcode == utils.CERT_WARNING:
1201
    return (LUClusterVerify.ETYPE_WARNING, fnamemsg)
1202
  elif errcode == utils.CERT_ERROR:
1203
    return (LUClusterVerify.ETYPE_ERROR, fnamemsg)
1204

    
1205
  raise errors.ProgrammerError("Unhandled certificate error code %r" % errcode)
1206

    
1207

    
1208
class LUClusterVerify(LogicalUnit):
1209
  """Verifies the cluster status.
1210

1211
  """
1212
  HPATH = "cluster-verify"
1213
  HTYPE = constants.HTYPE_CLUSTER
1214
  REQ_BGL = False
1215

    
1216
  TCLUSTER = "cluster"
1217
  TNODE = "node"
1218
  TINSTANCE = "instance"
1219

    
1220
  ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
1221
  ECLUSTERCERT = (TCLUSTER, "ECLUSTERCERT")
1222
  EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
1223
  EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
1224
  EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
1225
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1226
  EINSTANCEFAULTYDISK = (TINSTANCE, "EINSTANCEFAULTYDISK")
1227
  EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
1228
  EINSTANCESPLITGROUPS = (TINSTANCE, "EINSTANCESPLITGROUPS")
1229
  ENODEDRBD = (TNODE, "ENODEDRBD")
1230
  ENODEDRBDHELPER = (TNODE, "ENODEDRBDHELPER")
1231
  ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
1232
  ENODEHOOKS = (TNODE, "ENODEHOOKS")
1233
  ENODEHV = (TNODE, "ENODEHV")
1234
  ENODELVM = (TNODE, "ENODELVM")
1235
  ENODEN1 = (TNODE, "ENODEN1")
1236
  ENODENET = (TNODE, "ENODENET")
1237
  ENODEOS = (TNODE, "ENODEOS")
1238
  ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
1239
  ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
1240
  ENODERPC = (TNODE, "ENODERPC")
1241
  ENODESSH = (TNODE, "ENODESSH")
1242
  ENODEVERSION = (TNODE, "ENODEVERSION")
1243
  ENODESETUP = (TNODE, "ENODESETUP")
1244
  ENODETIME = (TNODE, "ENODETIME")
1245
  ENODEOOBPATH = (TNODE, "ENODEOOBPATH")
1246

    
1247
  ETYPE_FIELD = "code"
1248
  ETYPE_ERROR = "ERROR"
1249
  ETYPE_WARNING = "WARNING"
1250

    
1251
  _HOOKS_INDENT_RE = re.compile("^", re.M)
1252

    
1253
  class NodeImage(object):
1254
    """A class representing the logical and physical status of a node.
1255

1256
    @type name: string
1257
    @ivar name: the node name to which this object refers
1258
    @ivar volumes: a structure as returned from
1259
        L{ganeti.backend.GetVolumeList} (runtime)
1260
    @ivar instances: a list of running instances (runtime)
1261
    @ivar pinst: list of configured primary instances (config)
1262
    @ivar sinst: list of configured secondary instances (config)
1263
    @ivar sbp: dictionary of {primary-node: list of instances} for all
1264
        instances for which this node is secondary (config)
1265
    @ivar mfree: free memory, as reported by hypervisor (runtime)
1266
    @ivar dfree: free disk, as reported by the node (runtime)
1267
    @ivar offline: the offline status (config)
1268
    @type rpc_fail: boolean
1269
    @ivar rpc_fail: whether the RPC verify call was successfull (overall,
1270
        not whether the individual keys were correct) (runtime)
1271
    @type lvm_fail: boolean
1272
    @ivar lvm_fail: whether the RPC call didn't return valid LVM data
1273
    @type hyp_fail: boolean
1274
    @ivar hyp_fail: whether the RPC call didn't return the instance list
1275
    @type ghost: boolean
1276
    @ivar ghost: whether this is a known node or not (config)
1277
    @type os_fail: boolean
1278
    @ivar os_fail: whether the RPC call didn't return valid OS data
1279
    @type oslist: list
1280
    @ivar oslist: list of OSes as diagnosed by DiagnoseOS
1281
    @type vm_capable: boolean
1282
    @ivar vm_capable: whether the node can host instances
1283

1284
    """
1285
    def __init__(self, offline=False, name=None, vm_capable=True):
1286
      self.name = name
1287
      self.volumes = {}
1288
      self.instances = []
1289
      self.pinst = []
1290
      self.sinst = []
1291
      self.sbp = {}
1292
      self.mfree = 0
1293
      self.dfree = 0
1294
      self.offline = offline
1295
      self.vm_capable = vm_capable
1296
      self.rpc_fail = False
1297
      self.lvm_fail = False
1298
      self.hyp_fail = False
1299
      self.ghost = False
1300
      self.os_fail = False
1301
      self.oslist = {}
1302

    
1303
  def ExpandNames(self):
1304
    self.needed_locks = {
1305
      locking.LEVEL_NODE: locking.ALL_SET,
1306
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1307
    }
1308
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1309

    
1310
  def _Error(self, ecode, item, msg, *args, **kwargs):
1311
    """Format an error message.
1312

1313
    Based on the opcode's error_codes parameter, either format a
1314
    parseable error code, or a simpler error string.
1315

1316
    This must be called only from Exec and functions called from Exec.
1317

1318
    """
1319
    ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
1320
    itype, etxt = ecode
1321
    # first complete the msg
1322
    if args:
1323
      msg = msg % args
1324
    # then format the whole message
1325
    if self.op.error_codes:
1326
      msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1327
    else:
1328
      if item:
1329
        item = " " + item
1330
      else:
1331
        item = ""
1332
      msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1333
    # and finally report it via the feedback_fn
1334
    self._feedback_fn("  - %s" % msg)
1335

    
1336
  def _ErrorIf(self, cond, *args, **kwargs):
1337
    """Log an error message if the passed condition is True.
1338

1339
    """
1340
    cond = bool(cond) or self.op.debug_simulate_errors
1341
    if cond:
1342
      self._Error(*args, **kwargs)
1343
    # do not mark the operation as failed for WARN cases only
1344
    if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
1345
      self.bad = self.bad or cond
1346

    
1347
  def _VerifyNode(self, ninfo, nresult):
1348
    """Perform some basic validation on data returned from a node.
1349

1350
      - check the result data structure is well formed and has all the
1351
        mandatory fields
1352
      - check ganeti version
1353

1354
    @type ninfo: L{objects.Node}
1355
    @param ninfo: the node to check
1356
    @param nresult: the results from the node
1357
    @rtype: boolean
1358
    @return: whether overall this call was successful (and we can expect
1359
         reasonable values in the respose)
1360

1361
    """
1362
    node = ninfo.name
1363
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1364

    
1365
    # main result, nresult should be a non-empty dict
1366
    test = not nresult or not isinstance(nresult, dict)
1367
    _ErrorIf(test, self.ENODERPC, node,
1368
                  "unable to verify node: no data returned")
1369
    if test:
1370
      return False
1371

    
1372
    # compares ganeti version
1373
    local_version = constants.PROTOCOL_VERSION
1374
    remote_version = nresult.get("version", None)
1375
    test = not (remote_version and
1376
                isinstance(remote_version, (list, tuple)) and
1377
                len(remote_version) == 2)
1378
    _ErrorIf(test, self.ENODERPC, node,
1379
             "connection to node returned invalid data")
1380
    if test:
1381
      return False
1382

    
1383
    test = local_version != remote_version[0]
1384
    _ErrorIf(test, self.ENODEVERSION, node,
1385
             "incompatible protocol versions: master %s,"
1386
             " node %s", local_version, remote_version[0])
1387
    if test:
1388
      return False
1389

    
1390
    # node seems compatible, we can actually try to look into its results
1391

    
1392
    # full package version
1393
    self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
1394
                  self.ENODEVERSION, node,
1395
                  "software version mismatch: master %s, node %s",
1396
                  constants.RELEASE_VERSION, remote_version[1],
1397
                  code=self.ETYPE_WARNING)
1398

    
1399
    hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
1400
    if ninfo.vm_capable and isinstance(hyp_result, dict):
1401
      for hv_name, hv_result in hyp_result.iteritems():
1402
        test = hv_result is not None
1403
        _ErrorIf(test, self.ENODEHV, node,
1404
                 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1405

    
1406
    hvp_result = nresult.get(constants.NV_HVPARAMS, None)
1407
    if ninfo.vm_capable and isinstance(hvp_result, list):
1408
      for item, hv_name, hv_result in hvp_result:
1409
        _ErrorIf(True, self.ENODEHV, node,
1410
                 "hypervisor %s parameter verify failure (source %s): %s",
1411
                 hv_name, item, hv_result)
1412

    
1413
    test = nresult.get(constants.NV_NODESETUP,
1414
                           ["Missing NODESETUP results"])
1415
    _ErrorIf(test, self.ENODESETUP, node, "node setup error: %s",
1416
             "; ".join(test))
1417

    
1418
    return True
1419

    
1420
  def _VerifyNodeTime(self, ninfo, nresult,
1421
                      nvinfo_starttime, nvinfo_endtime):
1422
    """Check the node time.
1423

1424
    @type ninfo: L{objects.Node}
1425
    @param ninfo: the node to check
1426
    @param nresult: the remote results for the node
1427
    @param nvinfo_starttime: the start time of the RPC call
1428
    @param nvinfo_endtime: the end time of the RPC call
1429

1430
    """
1431
    node = ninfo.name
1432
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1433

    
1434
    ntime = nresult.get(constants.NV_TIME, None)
1435
    try:
1436
      ntime_merged = utils.MergeTime(ntime)
1437
    except (ValueError, TypeError):
1438
      _ErrorIf(True, self.ENODETIME, node, "Node returned invalid time")
1439
      return
1440

    
1441
    if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
1442
      ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
1443
    elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
1444
      ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
1445
    else:
1446
      ntime_diff = None
1447

    
1448
    _ErrorIf(ntime_diff is not None, self.ENODETIME, node,
1449
             "Node time diverges by at least %s from master node time",
1450
             ntime_diff)
1451

    
1452
  def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
1453
    """Check the node time.
1454

1455
    @type ninfo: L{objects.Node}
1456
    @param ninfo: the node to check
1457
    @param nresult: the remote results for the node
1458
    @param vg_name: the configured VG name
1459

1460
    """
1461
    if vg_name is None:
1462
      return
1463

    
1464
    node = ninfo.name
1465
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1466

    
1467
    # checks vg existence and size > 20G
1468
    vglist = nresult.get(constants.NV_VGLIST, None)
1469
    test = not vglist
1470
    _ErrorIf(test, self.ENODELVM, node, "unable to check volume groups")
1471
    if not test:
1472
      vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
1473
                                            constants.MIN_VG_SIZE)
1474
      _ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
1475

    
1476
    # check pv names
1477
    pvlist = nresult.get(constants.NV_PVLIST, None)
1478
    test = pvlist is None
1479
    _ErrorIf(test, self.ENODELVM, node, "Can't get PV list from node")
1480
    if not test:
1481
      # check that ':' is not present in PV names, since it's a
1482
      # special character for lvcreate (denotes the range of PEs to
1483
      # use on the PV)
1484
      for _, pvname, owner_vg in pvlist:
1485
        test = ":" in pvname
1486
        _ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV"
1487
                 " '%s' of VG '%s'", pvname, owner_vg)
1488

    
1489
  def _VerifyNodeNetwork(self, ninfo, nresult):
1490
    """Check the node time.
1491

1492
    @type ninfo: L{objects.Node}
1493
    @param ninfo: the node to check
1494
    @param nresult: the remote results for the node
1495

1496
    """
1497
    node = ninfo.name
1498
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1499

    
1500
    test = constants.NV_NODELIST not in nresult
1501
    _ErrorIf(test, self.ENODESSH, node,
1502
             "node hasn't returned node ssh connectivity data")
1503
    if not test:
1504
      if nresult[constants.NV_NODELIST]:
1505
        for a_node, a_msg in nresult[constants.NV_NODELIST].items():
1506
          _ErrorIf(True, self.ENODESSH, node,
1507
                   "ssh communication with node '%s': %s", a_node, a_msg)
1508

    
1509
    test = constants.NV_NODENETTEST not in nresult
1510
    _ErrorIf(test, self.ENODENET, node,
1511
             "node hasn't returned node tcp connectivity data")
1512
    if not test:
1513
      if nresult[constants.NV_NODENETTEST]:
1514
        nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
1515
        for anode in nlist:
1516
          _ErrorIf(True, self.ENODENET, node,
1517
                   "tcp communication with node '%s': %s",
1518
                   anode, nresult[constants.NV_NODENETTEST][anode])
1519

    
1520
    test = constants.NV_MASTERIP not in nresult
1521
    _ErrorIf(test, self.ENODENET, node,
1522
             "node hasn't returned node master IP reachability data")
1523
    if not test:
1524
      if not nresult[constants.NV_MASTERIP]:
1525
        if node == self.master_node:
1526
          msg = "the master node cannot reach the master IP (not configured?)"
1527
        else:
1528
          msg = "cannot reach the master IP"
1529
        _ErrorIf(True, self.ENODENET, node, msg)
1530

    
1531
  def _VerifyInstance(self, instance, instanceconfig, node_image,
1532
                      diskstatus):
1533
    """Verify an instance.
1534

1535
    This function checks to see if the required block devices are
1536
    available on the instance's node.
1537

1538
    """
1539
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1540
    node_current = instanceconfig.primary_node
1541

    
1542
    node_vol_should = {}
1543
    instanceconfig.MapLVsByNode(node_vol_should)
1544

    
1545
    for node in node_vol_should:
1546
      n_img = node_image[node]
1547
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1548
        # ignore missing volumes on offline or broken nodes
1549
        continue
1550
      for volume in node_vol_should[node]:
1551
        test = volume not in n_img.volumes
1552
        _ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
1553
                 "volume %s missing on node %s", volume, node)
1554

    
1555
    if instanceconfig.admin_up:
1556
      pri_img = node_image[node_current]
1557
      test = instance not in pri_img.instances and not pri_img.offline
1558
      _ErrorIf(test, self.EINSTANCEDOWN, instance,
1559
               "instance not running on its primary node %s",
1560
               node_current)
1561

    
1562
    for node, n_img in node_image.items():
1563
      if node != node_current:
1564
        test = instance in n_img.instances
1565
        _ErrorIf(test, self.EINSTANCEWRONGNODE, instance,
1566
                 "instance should not run on node %s", node)
1567

    
1568
    diskdata = [(nname, success, status, idx)
1569
                for (nname, disks) in diskstatus.items()
1570
                for idx, (success, status) in enumerate(disks)]
1571

    
1572
    for nname, success, bdev_status, idx in diskdata:
1573
      # the 'ghost node' construction in Exec() ensures that we have a
1574
      # node here
1575
      snode = node_image[nname]
1576
      bad_snode = snode.ghost or snode.offline
1577
      _ErrorIf(instanceconfig.admin_up and not success and not bad_snode,
1578
               self.EINSTANCEFAULTYDISK, instance,
1579
               "couldn't retrieve status for disk/%s on %s: %s",
1580
               idx, nname, bdev_status)
1581
      _ErrorIf((instanceconfig.admin_up and success and
1582
                bdev_status.ldisk_status == constants.LDS_FAULTY),
1583
               self.EINSTANCEFAULTYDISK, instance,
1584
               "disk/%s on %s is faulty", idx, nname)
1585

    
1586
  def _VerifyOrphanVolumes(self, node_vol_should, node_image, reserved):
1587
    """Verify if there are any unknown volumes in the cluster.
1588

1589
    The .os, .swap and backup volumes are ignored. All other volumes are
1590
    reported as unknown.
1591

1592
    @type reserved: L{ganeti.utils.FieldSet}
1593
    @param reserved: a FieldSet of reserved volume names
1594

1595
    """
1596
    for node, n_img in node_image.items():
1597
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1598
        # skip non-healthy nodes
1599
        continue
1600
      for volume in n_img.volumes:
1601
        test = ((node not in node_vol_should or
1602
                volume not in node_vol_should[node]) and
1603
                not reserved.Matches(volume))
1604
        self._ErrorIf(test, self.ENODEORPHANLV, node,
1605
                      "volume %s is unknown", volume)
1606

    
1607
  def _VerifyOrphanInstances(self, instancelist, node_image):
1608
    """Verify the list of running instances.
1609

1610
    This checks what instances are running but unknown to the cluster.
1611

1612
    """
1613
    for node, n_img in node_image.items():
1614
      for o_inst in n_img.instances:
1615
        test = o_inst not in instancelist
1616
        self._ErrorIf(test, self.ENODEORPHANINSTANCE, node,
1617
                      "instance %s on node %s should not exist", o_inst, node)
1618

    
1619
  def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
1620
    """Verify N+1 Memory Resilience.
1621

1622
    Check that if one single node dies we can still start all the
1623
    instances it was primary for.
1624

1625
    """
1626
    cluster_info = self.cfg.GetClusterInfo()
1627
    for node, n_img in node_image.items():
1628
      # This code checks that every node which is now listed as
1629
      # secondary has enough memory to host all instances it is
1630
      # supposed to should a single other node in the cluster fail.
1631
      # FIXME: not ready for failover to an arbitrary node
1632
      # FIXME: does not support file-backed instances
1633
      # WARNING: we currently take into account down instances as well
1634
      # as up ones, considering that even if they're down someone
1635
      # might want to start them even in the event of a node failure.
1636
      if n_img.offline:
1637
        # we're skipping offline nodes from the N+1 warning, since
1638
        # most likely we don't have good memory infromation from them;
1639
        # we already list instances living on such nodes, and that's
1640
        # enough warning
1641
        continue
1642
      for prinode, instances in n_img.sbp.items():
1643
        needed_mem = 0
1644
        for instance in instances:
1645
          bep = cluster_info.FillBE(instance_cfg[instance])
1646
          if bep[constants.BE_AUTO_BALANCE]:
1647
            needed_mem += bep[constants.BE_MEMORY]
1648
        test = n_img.mfree < needed_mem
1649
        self._ErrorIf(test, self.ENODEN1, node,
1650
                      "not enough memory to accomodate instance failovers"
1651
                      " should node %s fail", prinode)
1652

    
1653
  def _VerifyNodeFiles(self, ninfo, nresult, file_list, local_cksum,
1654
                       master_files):
1655
    """Verifies and computes the node required file checksums.
1656

1657
    @type ninfo: L{objects.Node}
1658
    @param ninfo: the node to check
1659
    @param nresult: the remote results for the node
1660
    @param file_list: required list of files
1661
    @param local_cksum: dictionary of local files and their checksums
1662
    @param master_files: list of files that only masters should have
1663

1664
    """
1665
    node = ninfo.name
1666
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1667

    
1668
    remote_cksum = nresult.get(constants.NV_FILELIST, None)
1669
    test = not isinstance(remote_cksum, dict)
1670
    _ErrorIf(test, self.ENODEFILECHECK, node,
1671
             "node hasn't returned file checksum data")
1672
    if test:
1673
      return
1674

    
1675
    for file_name in file_list:
1676
      node_is_mc = ninfo.master_candidate
1677
      must_have = (file_name not in master_files) or node_is_mc
1678
      # missing
1679
      test1 = file_name not in remote_cksum
1680
      # invalid checksum
1681
      test2 = not test1 and remote_cksum[file_name] != local_cksum[file_name]
1682
      # existing and good
1683
      test3 = not test1 and remote_cksum[file_name] == local_cksum[file_name]
1684
      _ErrorIf(test1 and must_have, self.ENODEFILECHECK, node,
1685
               "file '%s' missing", file_name)
1686
      _ErrorIf(test2 and must_have, self.ENODEFILECHECK, node,
1687
               "file '%s' has wrong checksum", file_name)
1688
      # not candidate and this is not a must-have file
1689
      _ErrorIf(test2 and not must_have, self.ENODEFILECHECK, node,
1690
               "file '%s' should not exist on non master"
1691
               " candidates (and the file is outdated)", file_name)
1692
      # all good, except non-master/non-must have combination
1693
      _ErrorIf(test3 and not must_have, self.ENODEFILECHECK, node,
1694
               "file '%s' should not exist"
1695
               " on non master candidates", file_name)
1696

    
1697
  def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_helper,
1698
                      drbd_map):
1699
    """Verifies and the node DRBD status.
1700

1701
    @type ninfo: L{objects.Node}
1702
    @param ninfo: the node to check
1703
    @param nresult: the remote results for the node
1704
    @param instanceinfo: the dict of instances
1705
    @param drbd_helper: the configured DRBD usermode helper
1706
    @param drbd_map: the DRBD map as returned by
1707
        L{ganeti.config.ConfigWriter.ComputeDRBDMap}
1708

1709
    """
1710
    node = ninfo.name
1711
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1712

    
1713
    if drbd_helper:
1714
      helper_result = nresult.get(constants.NV_DRBDHELPER, None)
1715
      test = (helper_result == None)
1716
      _ErrorIf(test, self.ENODEDRBDHELPER, node,
1717
               "no drbd usermode helper returned")
1718
      if helper_result:
1719
        status, payload = helper_result
1720
        test = not status
1721
        _ErrorIf(test, self.ENODEDRBDHELPER, node,
1722
                 "drbd usermode helper check unsuccessful: %s", payload)
1723
        test = status and (payload != drbd_helper)
1724
        _ErrorIf(test, self.ENODEDRBDHELPER, node,
1725
                 "wrong drbd usermode helper: %s", payload)
1726

    
1727
    # compute the DRBD minors
1728
    node_drbd = {}
1729
    for minor, instance in drbd_map[node].items():
1730
      test = instance not in instanceinfo
1731
      _ErrorIf(test, self.ECLUSTERCFG, None,
1732
               "ghost instance '%s' in temporary DRBD map", instance)
1733
        # ghost instance should not be running, but otherwise we
1734
        # don't give double warnings (both ghost instance and
1735
        # unallocated minor in use)
1736
      if test:
1737
        node_drbd[minor] = (instance, False)
1738
      else:
1739
        instance = instanceinfo[instance]
1740
        node_drbd[minor] = (instance.name, instance.admin_up)
1741

    
1742
    # and now check them
1743
    used_minors = nresult.get(constants.NV_DRBDLIST, [])
1744
    test = not isinstance(used_minors, (tuple, list))
1745
    _ErrorIf(test, self.ENODEDRBD, node,
1746
             "cannot parse drbd status file: %s", str(used_minors))
1747
    if test:
1748
      # we cannot check drbd status
1749
      return
1750

    
1751
    for minor, (iname, must_exist) in node_drbd.items():
1752
      test = minor not in used_minors and must_exist
1753
      _ErrorIf(test, self.ENODEDRBD, node,
1754
               "drbd minor %d of instance %s is not active", minor, iname)
1755
    for minor in used_minors:
1756
      test = minor not in node_drbd
1757
      _ErrorIf(test, self.ENODEDRBD, node,
1758
               "unallocated drbd minor %d is in use", minor)
1759

    
1760
  def _UpdateNodeOS(self, ninfo, nresult, nimg):
1761
    """Builds the node OS structures.
1762

1763
    @type ninfo: L{objects.Node}
1764
    @param ninfo: the node to check
1765
    @param nresult: the remote results for the node
1766
    @param nimg: the node image object
1767

1768
    """
1769
    node = ninfo.name
1770
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1771

    
1772
    remote_os = nresult.get(constants.NV_OSLIST, None)
1773
    test = (not isinstance(remote_os, list) or
1774
            not compat.all(isinstance(v, list) and len(v) == 7
1775
                           for v in remote_os))
1776

    
1777
    _ErrorIf(test, self.ENODEOS, node,
1778
             "node hasn't returned valid OS data")
1779

    
1780
    nimg.os_fail = test
1781

    
1782
    if test:
1783
      return
1784

    
1785
    os_dict = {}
1786

    
1787
    for (name, os_path, status, diagnose,
1788
         variants, parameters, api_ver) in nresult[constants.NV_OSLIST]:
1789

    
1790
      if name not in os_dict:
1791
        os_dict[name] = []
1792

    
1793
      # parameters is a list of lists instead of list of tuples due to
1794
      # JSON lacking a real tuple type, fix it:
1795
      parameters = [tuple(v) for v in parameters]
1796
      os_dict[name].append((os_path, status, diagnose,
1797
                            set(variants), set(parameters), set(api_ver)))
1798

    
1799
    nimg.oslist = os_dict
1800

    
1801
  def _VerifyNodeOS(self, ninfo, nimg, base):
1802
    """Verifies the node OS list.
1803

1804
    @type ninfo: L{objects.Node}
1805
    @param ninfo: the node to check
1806
    @param nimg: the node image object
1807
    @param base: the 'template' node we match against (e.g. from the master)
1808

1809
    """
1810
    node = ninfo.name
1811
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1812

    
1813
    assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?"
1814

    
1815
    for os_name, os_data in nimg.oslist.items():
1816
      assert os_data, "Empty OS status for OS %s?!" % os_name
1817
      f_path, f_status, f_diag, f_var, f_param, f_api = os_data[0]
1818
      _ErrorIf(not f_status, self.ENODEOS, node,
1819
               "Invalid OS %s (located at %s): %s", os_name, f_path, f_diag)
1820
      _ErrorIf(len(os_data) > 1, self.ENODEOS, node,
1821
               "OS '%s' has multiple entries (first one shadows the rest): %s",
1822
               os_name, utils.CommaJoin([v[0] for v in os_data]))
1823
      # this will catched in backend too
1824
      _ErrorIf(compat.any(v >= constants.OS_API_V15 for v in f_api)
1825
               and not f_var, self.ENODEOS, node,
1826
               "OS %s with API at least %d does not declare any variant",
1827
               os_name, constants.OS_API_V15)
1828
      # comparisons with the 'base' image
1829
      test = os_name not in base.oslist
1830
      _ErrorIf(test, self.ENODEOS, node,
1831
               "Extra OS %s not present on reference node (%s)",
1832
               os_name, base.name)
1833
      if test:
1834
        continue
1835
      assert base.oslist[os_name], "Base node has empty OS status?"
1836
      _, b_status, _, b_var, b_param, b_api = base.oslist[os_name][0]
1837
      if not b_status:
1838
        # base OS is invalid, skipping
1839
        continue
1840
      for kind, a, b in [("API version", f_api, b_api),
1841
                         ("variants list", f_var, b_var),
1842
                         ("parameters", f_param, b_param)]:
1843
        _ErrorIf(a != b, self.ENODEOS, node,
1844
                 "OS %s %s differs from reference node %s: %s vs. %s",
1845
                 kind, os_name, base.name,
1846
                 utils.CommaJoin(a), utils.CommaJoin(b))
1847

    
1848
    # check any missing OSes
1849
    missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
1850
    _ErrorIf(missing, self.ENODEOS, node,
1851
             "OSes present on reference node %s but missing on this node: %s",
1852
             base.name, utils.CommaJoin(missing))
1853

    
1854
  def _VerifyOob(self, ninfo, nresult):
1855
    """Verifies out of band functionality of a node.
1856

1857
    @type ninfo: L{objects.Node}
1858
    @param ninfo: the node to check
1859
    @param nresult: the remote results for the node
1860

1861
    """
1862
    node = ninfo.name
1863
    # We just have to verify the paths on master and/or master candidates
1864
    # as the oob helper is invoked on the master
1865
    if ((ninfo.master_candidate or ninfo.master_capable) and
1866
        constants.NV_OOB_PATHS in nresult):
1867
      for path_result in nresult[constants.NV_OOB_PATHS]:
1868
        self._ErrorIf(path_result, self.ENODEOOBPATH, node, path_result)
1869

    
1870
  def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
1871
    """Verifies and updates the node volume data.
1872

1873
    This function will update a L{NodeImage}'s internal structures
1874
    with data from the remote call.
1875

1876
    @type ninfo: L{objects.Node}
1877
    @param ninfo: the node to check
1878
    @param nresult: the remote results for the node
1879
    @param nimg: the node image object
1880
    @param vg_name: the configured VG name
1881

1882
    """
1883
    node = ninfo.name
1884
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1885

    
1886
    nimg.lvm_fail = True
1887
    lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1888
    if vg_name is None:
1889
      pass
1890
    elif isinstance(lvdata, basestring):
1891
      _ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s",
1892
               utils.SafeEncode(lvdata))
1893
    elif not isinstance(lvdata, dict):
1894
      _ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)")
1895
    else:
1896
      nimg.volumes = lvdata
1897
      nimg.lvm_fail = False
1898

    
1899
  def _UpdateNodeInstances(self, ninfo, nresult, nimg):
1900
    """Verifies and updates the node instance list.
1901

1902
    If the listing was successful, then updates this node's instance
1903
    list. Otherwise, it marks the RPC call as failed for the instance
1904
    list key.
1905

1906
    @type ninfo: L{objects.Node}
1907
    @param ninfo: the node to check
1908
    @param nresult: the remote results for the node
1909
    @param nimg: the node image object
1910

1911
    """
1912
    idata = nresult.get(constants.NV_INSTANCELIST, None)
1913
    test = not isinstance(idata, list)
1914
    self._ErrorIf(test, self.ENODEHV, ninfo.name, "rpc call to node failed"
1915
                  " (instancelist): %s", utils.SafeEncode(str(idata)))
1916
    if test:
1917
      nimg.hyp_fail = True
1918
    else:
1919
      nimg.instances = idata
1920

    
1921
  def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
1922
    """Verifies and computes a node information map
1923

1924
    @type ninfo: L{objects.Node}
1925
    @param ninfo: the node to check
1926
    @param nresult: the remote results for the node
1927
    @param nimg: the node image object
1928
    @param vg_name: the configured VG name
1929

1930
    """
1931
    node = ninfo.name
1932
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1933

    
1934
    # try to read free memory (from the hypervisor)
1935
    hv_info = nresult.get(constants.NV_HVINFO, None)
1936
    test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
1937
    _ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)")
1938
    if not test:
1939
      try:
1940
        nimg.mfree = int(hv_info["memory_free"])
1941
      except (ValueError, TypeError):
1942
        _ErrorIf(True, self.ENODERPC, node,
1943
                 "node returned invalid nodeinfo, check hypervisor")
1944

    
1945
    # FIXME: devise a free space model for file based instances as well
1946
    if vg_name is not None:
1947
      test = (constants.NV_VGLIST not in nresult or
1948
              vg_name not in nresult[constants.NV_VGLIST])
1949
      _ErrorIf(test, self.ENODELVM, node,
1950
               "node didn't return data for the volume group '%s'"
1951
               " - it is either missing or broken", vg_name)
1952
      if not test:
1953
        try:
1954
          nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
1955
        except (ValueError, TypeError):
1956
          _ErrorIf(True, self.ENODERPC, node,
1957
                   "node returned invalid LVM info, check LVM status")
1958

    
1959
  def _CollectDiskInfo(self, nodelist, node_image, instanceinfo):
1960
    """Gets per-disk status information for all instances.
1961

1962
    @type nodelist: list of strings
1963
    @param nodelist: Node names
1964
    @type node_image: dict of (name, L{objects.Node})
1965
    @param node_image: Node objects
1966
    @type instanceinfo: dict of (name, L{objects.Instance})
1967
    @param instanceinfo: Instance objects
1968
    @rtype: {instance: {node: [(succes, payload)]}}
1969
    @return: a dictionary of per-instance dictionaries with nodes as
1970
        keys and disk information as values; the disk information is a
1971
        list of tuples (success, payload)
1972

1973
    """
1974
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1975

    
1976
    node_disks = {}
1977
    node_disks_devonly = {}
1978
    diskless_instances = set()
1979
    diskless = constants.DT_DISKLESS
1980

    
1981
    for nname in nodelist:
1982
      node_instances = list(itertools.chain(node_image[nname].pinst,
1983
                                            node_image[nname].sinst))
1984
      diskless_instances.update(inst for inst in node_instances
1985
                                if instanceinfo[inst].disk_template == diskless)
1986
      disks = [(inst, disk)
1987
               for inst in node_instances
1988
               for disk in instanceinfo[inst].disks]
1989

    
1990
      if not disks:
1991
        # No need to collect data
1992
        continue
1993

    
1994
      node_disks[nname] = disks
1995

    
1996
      # Creating copies as SetDiskID below will modify the objects and that can
1997
      # lead to incorrect data returned from nodes
1998
      devonly = [dev.Copy() for (_, dev) in disks]
1999

    
2000
      for dev in devonly:
2001
        self.cfg.SetDiskID(dev, nname)
2002

    
2003
      node_disks_devonly[nname] = devonly
2004

    
2005
    assert len(node_disks) == len(node_disks_devonly)
2006

    
2007
    # Collect data from all nodes with disks
2008
    result = self.rpc.call_blockdev_getmirrorstatus_multi(node_disks.keys(),
2009
                                                          node_disks_devonly)
2010

    
2011
    assert len(result) == len(node_disks)
2012

    
2013
    instdisk = {}
2014

    
2015
    for (nname, nres) in result.items():
2016
      disks = node_disks[nname]
2017

    
2018
      if nres.offline:
2019
        # No data from this node
2020
        data = len(disks) * [(False, "node offline")]
2021
      else:
2022
        msg = nres.fail_msg
2023
        _ErrorIf(msg, self.ENODERPC, nname,
2024
                 "while getting disk information: %s", msg)
2025
        if msg:
2026
          # No data from this node
2027
          data = len(disks) * [(False, msg)]
2028
        else:
2029
          data = []
2030
          for idx, i in enumerate(nres.payload):
2031
            if isinstance(i, (tuple, list)) and len(i) == 2:
2032
              data.append(i)
2033
            else:
2034
              logging.warning("Invalid result from node %s, entry %d: %s",
2035
                              nname, idx, i)
2036
              data.append((False, "Invalid result from the remote node"))
2037

    
2038
      for ((inst, _), status) in zip(disks, data):
2039
        instdisk.setdefault(inst, {}).setdefault(nname, []).append(status)
2040

    
2041
    # Add empty entries for diskless instances.
2042
    for inst in diskless_instances:
2043
      assert inst not in instdisk
2044
      instdisk[inst] = {}
2045

    
2046
    assert compat.all(len(statuses) == len(instanceinfo[inst].disks) and
2047
                      len(nnames) <= len(instanceinfo[inst].all_nodes) and
2048
                      compat.all(isinstance(s, (tuple, list)) and
2049
                                 len(s) == 2 for s in statuses)
2050
                      for inst, nnames in instdisk.items()
2051
                      for nname, statuses in nnames.items())
2052
    assert set(instdisk) == set(instanceinfo), "instdisk consistency failure"
2053

    
2054
    return instdisk
2055

    
2056
  def _VerifyHVP(self, hvp_data):
2057
    """Verifies locally the syntax of the hypervisor parameters.
2058

2059
    """
2060
    for item, hv_name, hv_params in hvp_data:
2061
      msg = ("hypervisor %s parameters syntax check (source %s): %%s" %
2062
             (item, hv_name))
2063
      try:
2064
        hv_class = hypervisor.GetHypervisor(hv_name)
2065
        utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2066
        hv_class.CheckParameterSyntax(hv_params)
2067
      except errors.GenericError, err:
2068
        self._ErrorIf(True, self.ECLUSTERCFG, None, msg % str(err))
2069

    
2070

    
2071
  def BuildHooksEnv(self):
2072
    """Build hooks env.
2073

2074
    Cluster-Verify hooks just ran in the post phase and their failure makes
2075
    the output be logged in the verify output and the verification to fail.
2076

2077
    """
2078
    all_nodes = self.cfg.GetNodeList()
2079
    env = {
2080
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
2081
      }
2082
    for node in self.cfg.GetAllNodesInfo().values():
2083
      env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
2084

    
2085
    return env, [], all_nodes
2086

    
2087
  def Exec(self, feedback_fn):
2088
    """Verify integrity of cluster, performing various test on nodes.
2089

2090
    """
2091
    # This method has too many local variables. pylint: disable-msg=R0914
2092
    self.bad = False
2093
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
2094
    verbose = self.op.verbose
2095
    self._feedback_fn = feedback_fn
2096
    feedback_fn("* Verifying global settings")
2097
    for msg in self.cfg.VerifyConfig():
2098
      _ErrorIf(True, self.ECLUSTERCFG, None, msg)
2099

    
2100
    # Check the cluster certificates
2101
    for cert_filename in constants.ALL_CERT_FILES:
2102
      (errcode, msg) = _VerifyCertificate(cert_filename)
2103
      _ErrorIf(errcode, self.ECLUSTERCERT, None, msg, code=errcode)
2104

    
2105
    vg_name = self.cfg.GetVGName()
2106
    drbd_helper = self.cfg.GetDRBDHelper()
2107
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
2108
    cluster = self.cfg.GetClusterInfo()
2109
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
2110
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
2111
    nodeinfo_byname = dict(zip(nodelist, nodeinfo))
2112
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
2113
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
2114
                        for iname in instancelist)
2115
    groupinfo = self.cfg.GetAllNodeGroupsInfo()
2116
    i_non_redundant = [] # Non redundant instances
2117
    i_non_a_balanced = [] # Non auto-balanced instances
2118
    n_offline = 0 # Count of offline nodes
2119
    n_drained = 0 # Count of nodes being drained
2120
    node_vol_should = {}
2121

    
2122
    # FIXME: verify OS list
2123
    # do local checksums
2124
    master_files = [constants.CLUSTER_CONF_FILE]
2125
    master_node = self.master_node = self.cfg.GetMasterNode()
2126
    master_ip = self.cfg.GetMasterIP()
2127

    
2128
    file_names = ssconf.SimpleStore().GetFileList()
2129
    file_names.extend(constants.ALL_CERT_FILES)
2130
    file_names.extend(master_files)
2131
    if cluster.modify_etc_hosts:
2132
      file_names.append(constants.ETC_HOSTS)
2133

    
2134
    local_checksums = utils.FingerprintFiles(file_names)
2135

    
2136
    # Compute the set of hypervisor parameters
2137
    hvp_data = []
2138
    for hv_name in hypervisors:
2139
      hvp_data.append(("cluster", hv_name, cluster.GetHVDefaults(hv_name)))
2140
    for os_name, os_hvp in cluster.os_hvp.items():
2141
      for hv_name, hv_params in os_hvp.items():
2142
        if not hv_params:
2143
          continue
2144
        full_params = cluster.GetHVDefaults(hv_name, os_name=os_name)
2145
        hvp_data.append(("os %s" % os_name, hv_name, full_params))
2146
    # TODO: collapse identical parameter values in a single one
2147
    for instance in instanceinfo.values():
2148
      if not instance.hvparams:
2149
        continue
2150
      hvp_data.append(("instance %s" % instance.name, instance.hypervisor,
2151
                       cluster.FillHV(instance)))
2152
    # and verify them locally
2153
    self._VerifyHVP(hvp_data)
2154

    
2155
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
2156
    node_verify_param = {
2157
      constants.NV_FILELIST: file_names,
2158
      constants.NV_NODELIST: [node.name for node in nodeinfo
2159
                              if not node.offline],
2160
      constants.NV_HYPERVISOR: hypervisors,
2161
      constants.NV_HVPARAMS: hvp_data,
2162
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
2163
                                  node.secondary_ip) for node in nodeinfo
2164
                                 if not node.offline],
2165
      constants.NV_INSTANCELIST: hypervisors,
2166
      constants.NV_VERSION: None,
2167
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
2168
      constants.NV_NODESETUP: None,
2169
      constants.NV_TIME: None,
2170
      constants.NV_MASTERIP: (master_node, master_ip),
2171
      constants.NV_OSLIST: None,
2172
      constants.NV_VMNODES: self.cfg.GetNonVmCapableNodeList(),
2173
      }
2174

    
2175
    if vg_name is not None:
2176
      node_verify_param[constants.NV_VGLIST] = None
2177
      node_verify_param[constants.NV_LVLIST] = vg_name
2178
      node_verify_param[constants.NV_PVLIST] = [vg_name]
2179
      node_verify_param[constants.NV_DRBDLIST] = None
2180

    
2181
    if drbd_helper:
2182
      node_verify_param[constants.NV_DRBDHELPER] = drbd_helper
2183

    
2184
    # Build our expected cluster state
2185
    node_image = dict((node.name, self.NodeImage(offline=node.offline,
2186
                                                 name=node.name,
2187
                                                 vm_capable=node.vm_capable))
2188
                      for node in nodeinfo)
2189

    
2190
    # Gather OOB paths
2191
    oob_paths = []
2192
    for node in nodeinfo:
2193
      path = _SupportsOob(self.cfg, node)
2194
      if path and path not in oob_paths:
2195
        oob_paths.append(path)
2196

    
2197
    if oob_paths:
2198
      node_verify_param[constants.NV_OOB_PATHS] = oob_paths
2199

    
2200
    for instance in instancelist:
2201
      inst_config = instanceinfo[instance]
2202

    
2203
      for nname in inst_config.all_nodes:
2204
        if nname not in node_image:
2205
          # ghost node
2206
          gnode = self.NodeImage(name=nname)
2207
          gnode.ghost = True
2208
          node_image[nname] = gnode
2209

    
2210
      inst_config.MapLVsByNode(node_vol_should)
2211

    
2212
      pnode = inst_config.primary_node
2213
      node_image[pnode].pinst.append(instance)
2214

    
2215
      for snode in inst_config.secondary_nodes:
2216
        nimg = node_image[snode]
2217
        nimg.sinst.append(instance)
2218
        if pnode not in nimg.sbp:
2219
          nimg.sbp[pnode] = []
2220
        nimg.sbp[pnode].append(instance)
2221

    
2222
    # At this point, we have the in-memory data structures complete,
2223
    # except for the runtime information, which we'll gather next
2224

    
2225
    # Due to the way our RPC system works, exact response times cannot be
2226
    # guaranteed (e.g. a broken node could run into a timeout). By keeping the
2227
    # time before and after executing the request, we can at least have a time
2228
    # window.
2229
    nvinfo_starttime = time.time()
2230
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
2231
                                           self.cfg.GetClusterName())
2232
    nvinfo_endtime = time.time()
2233

    
2234
    all_drbd_map = self.cfg.ComputeDRBDMap()
2235

    
2236
    feedback_fn("* Gathering disk information (%s nodes)" % len(nodelist))
2237
    instdisk = self._CollectDiskInfo(nodelist, node_image, instanceinfo)
2238

    
2239
    feedback_fn("* Verifying node status")
2240

    
2241
    refos_img = None
2242

    
2243
    for node_i in nodeinfo:
2244
      node = node_i.name
2245
      nimg = node_image[node]
2246

    
2247
      if node_i.offline:
2248
        if verbose:
2249
          feedback_fn("* Skipping offline node %s" % (node,))
2250
        n_offline += 1
2251
        continue
2252

    
2253
      if node == master_node:
2254
        ntype = "master"
2255
      elif node_i.master_candidate:
2256
        ntype = "master candidate"
2257
      elif node_i.drained:
2258
        ntype = "drained"
2259
        n_drained += 1
2260
      else:
2261
        ntype = "regular"
2262
      if verbose:
2263
        feedback_fn("* Verifying node %s (%s)" % (node, ntype))
2264

    
2265
      msg = all_nvinfo[node].fail_msg
2266
      _ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg)
2267
      if msg:
2268
        nimg.rpc_fail = True
2269
        continue
2270

    
2271
      nresult = all_nvinfo[node].payload
2272

    
2273
      nimg.call_ok = self._VerifyNode(node_i, nresult)
2274
      self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
2275
      self._VerifyNodeNetwork(node_i, nresult)
2276
      self._VerifyNodeFiles(node_i, nresult, file_names, local_checksums,
2277
                            master_files)
2278

    
2279
      self._VerifyOob(node_i, nresult)
2280

    
2281
      if nimg.vm_capable:
2282
        self._VerifyNodeLVM(node_i, nresult, vg_name)
2283
        self._VerifyNodeDrbd(node_i, nresult, instanceinfo, drbd_helper,
2284
                             all_drbd_map)
2285

    
2286
        self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
2287
        self._UpdateNodeInstances(node_i, nresult, nimg)
2288
        self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
2289
        self._UpdateNodeOS(node_i, nresult, nimg)
2290
        if not nimg.os_fail:
2291
          if refos_img is None:
2292
            refos_img = nimg
2293
          self._VerifyNodeOS(node_i, nimg, refos_img)
2294

    
2295
    feedback_fn("* Verifying instance status")
2296
    for instance in instancelist:
2297
      if verbose:
2298
        feedback_fn("* Verifying instance %s" % instance)
2299
      inst_config = instanceinfo[instance]
2300
      self._VerifyInstance(instance, inst_config, node_image,
2301
                           instdisk[instance])
2302
      inst_nodes_offline = []
2303

    
2304
      pnode = inst_config.primary_node
2305
      pnode_img = node_image[pnode]
2306
      _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
2307
               self.ENODERPC, pnode, "instance %s, connection to"
2308
               " primary node failed", instance)
2309

    
2310
      _ErrorIf(pnode_img.offline, self.EINSTANCEBADNODE, instance,
2311
               "instance lives on offline node %s", inst_config.primary_node)
2312

    
2313
      # If the instance is non-redundant we cannot survive losing its primary
2314
      # node, so we are not N+1 compliant. On the other hand we have no disk
2315
      # templates with more than one secondary so that situation is not well
2316
      # supported either.
2317
      # FIXME: does not support file-backed instances
2318
      if not inst_config.secondary_nodes:
2319
        i_non_redundant.append(instance)
2320

    
2321
      _ErrorIf(len(inst_config.secondary_nodes) > 1, self.EINSTANCELAYOUT,
2322
               instance, "instance has multiple secondary nodes: %s",
2323
               utils.CommaJoin(inst_config.secondary_nodes),
2324
               code=self.ETYPE_WARNING)
2325

    
2326
      if inst_config.disk_template in constants.DTS_NET_MIRROR:
2327
        pnode = inst_config.primary_node
2328
        instance_nodes = utils.NiceSort(inst_config.all_nodes)
2329
        instance_groups = {}
2330

    
2331
        for node in instance_nodes:
2332
          instance_groups.setdefault(nodeinfo_byname[node].group,
2333
                                     []).append(node)
2334

    
2335
        pretty_list = [
2336
          "%s (group %s)" % (utils.CommaJoin(nodes), groupinfo[group].name)
2337
          # Sort so that we always list the primary node first.
2338
          for group, nodes in sorted(instance_groups.items(),
2339
                                     key=lambda (_, nodes): pnode in nodes,
2340
                                     reverse=True)]
2341

    
2342
        self._ErrorIf(len(instance_groups) > 1, self.EINSTANCESPLITGROUPS,
2343
                      instance, "instance has primary and secondary nodes in"
2344
                      " different groups: %s", utils.CommaJoin(pretty_list),
2345
                      code=self.ETYPE_WARNING)
2346

    
2347
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
2348
        i_non_a_balanced.append(instance)
2349

    
2350
      for snode in inst_config.secondary_nodes:
2351
        s_img = node_image[snode]
2352
        _ErrorIf(s_img.rpc_fail and not s_img.offline, self.ENODERPC, snode,
2353
                 "instance %s, connection to secondary node failed", instance)
2354

    
2355
        if s_img.offline:
2356
          inst_nodes_offline.append(snode)
2357

    
2358
      # warn that the instance lives on offline nodes
2359
      _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
2360
               "instance has offline secondary node(s) %s",
2361
               utils.CommaJoin(inst_nodes_offline))
2362
      # ... or ghost/non-vm_capable nodes
2363
      for node in inst_config.all_nodes:
2364
        _ErrorIf(node_image[node].ghost, self.EINSTANCEBADNODE, instance,
2365
                 "instance lives on ghost node %s", node)
2366
        _ErrorIf(not node_image[node].vm_capable, self.EINSTANCEBADNODE,
2367
                 instance, "instance lives on non-vm_capable node %s", node)
2368

    
2369
    feedback_fn("* Verifying orphan volumes")
2370
    reserved = utils.FieldSet(*cluster.reserved_lvs)
2371
    self._VerifyOrphanVolumes(node_vol_should, node_image, reserved)
2372

    
2373
    feedback_fn("* Verifying orphan instances")
2374
    self._VerifyOrphanInstances(instancelist, node_image)
2375

    
2376
    if constants.VERIFY_NPLUSONE_MEM not in self.op.skip_checks:
2377
      feedback_fn("* Verifying N+1 Memory redundancy")
2378
      self._VerifyNPlusOneMemory(node_image, instanceinfo)
2379

    
2380
    feedback_fn("* Other Notes")
2381
    if i_non_redundant:
2382
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
2383
                  % len(i_non_redundant))
2384

    
2385
    if i_non_a_balanced:
2386
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
2387
                  % len(i_non_a_balanced))
2388

    
2389
    if n_offline:
2390
      feedback_fn("  - NOTICE: %d offline node(s) found." % n_offline)
2391

    
2392
    if n_drained:
2393
      feedback_fn("  - NOTICE: %d drained node(s) found." % n_drained)
2394

    
2395
    return not self.bad
2396

    
2397
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
2398
    """Analyze the post-hooks' result
2399

2400
    This method analyses the hook result, handles it, and sends some
2401
    nicely-formatted feedback back to the user.
2402

2403
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
2404
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
2405
    @param hooks_results: the results of the multi-node hooks rpc call
2406
    @param feedback_fn: function used send feedback back to the caller
2407
    @param lu_result: previous Exec result
2408
    @return: the new Exec result, based on the previous result
2409
        and hook results
2410

2411
    """
2412
    # We only really run POST phase hooks, and are only interested in
2413
    # their results
2414
    if phase == constants.HOOKS_PHASE_POST:
2415
      # Used to change hooks' output to proper indentation
2416
      feedback_fn("* Hooks Results")
2417
      assert hooks_results, "invalid result from hooks"
2418

    
2419
      for node_name in hooks_results:
2420
        res = hooks_results[node_name]
2421
        msg = res.fail_msg
2422
        test = msg and not res.offline
2423
        self._ErrorIf(test, self.ENODEHOOKS, node_name,
2424
                      "Communication failure in hooks execution: %s", msg)
2425
        if res.offline or msg:
2426
          # No need to investigate payload if node is offline or gave an error.
2427
          # override manually lu_result here as _ErrorIf only
2428
          # overrides self.bad
2429
          lu_result = 1
2430
          continue
2431
        for script, hkr, output in res.payload:
2432
          test = hkr == constants.HKR_FAIL
2433
          self._ErrorIf(test, self.ENODEHOOKS, node_name,
2434
                        "Script %s failed, output:", script)
2435
          if test:
2436
            output = self._HOOKS_INDENT_RE.sub('      ', output)
2437
            feedback_fn("%s" % output)
2438
            lu_result = 0
2439

    
2440
      return lu_result
2441

    
2442

    
2443
class LUClusterVerifyDisks(NoHooksLU):
2444
  """Verifies the cluster disks status.
2445

2446
  """
2447
  REQ_BGL = False
2448

    
2449
  def ExpandNames(self):
2450
    self.needed_locks = {
2451
      locking.LEVEL_NODE: locking.ALL_SET,
2452
      locking.LEVEL_INSTANCE: locking.ALL_SET,
2453
    }
2454
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
2455

    
2456
  def Exec(self, feedback_fn):
2457
    """Verify integrity of cluster disks.
2458

2459
    @rtype: tuple of three items
2460
    @return: a tuple of (dict of node-to-node_error, list of instances
2461
        which need activate-disks, dict of instance: (node, volume) for
2462
        missing volumes
2463

2464
    """
2465
    result = res_nodes, res_instances, res_missing = {}, [], {}
2466

    
2467
    nodes = utils.NiceSort(self.cfg.GetVmCapableNodeList())
2468
    instances = self.cfg.GetAllInstancesInfo().values()
2469

    
2470
    nv_dict = {}
2471
    for inst in instances:
2472
      inst_lvs = {}
2473
      if not inst.admin_up:
2474
        continue
2475
      inst.MapLVsByNode(inst_lvs)
2476
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
2477
      for node, vol_list in inst_lvs.iteritems():
2478
        for vol in vol_list:
2479
          nv_dict[(node, vol)] = inst
2480

    
2481
    if not nv_dict:
2482
      return result
2483

    
2484
    node_lvs = self.rpc.call_lv_list(nodes, [])
2485
    for node, node_res in node_lvs.items():
2486
      if node_res.offline:
2487
        continue
2488
      msg = node_res.fail_msg
2489
      if msg:
2490
        logging.warning("Error enumerating LVs on node %s: %s", node, msg)
2491
        res_nodes[node] = msg
2492
        continue
2493

    
2494
      lvs = node_res.payload
2495
      for lv_name, (_, _, lv_online) in lvs.items():
2496
        inst = nv_dict.pop((node, lv_name), None)
2497
        if (not lv_online and inst is not None
2498
            and inst.name not in res_instances):
2499
          res_instances.append(inst.name)
2500

    
2501
    # any leftover items in nv_dict are missing LVs, let's arrange the
2502
    # data better
2503
    for key, inst in nv_dict.iteritems():
2504
      if inst.name not in res_missing:
2505
        res_missing[inst.name] = []
2506
      res_missing[inst.name].append(key)
2507

    
2508
    return result
2509

    
2510

    
2511
class LUClusterRepairDiskSizes(NoHooksLU):
2512
  """Verifies the cluster disks sizes.
2513

2514
  """
2515
  REQ_BGL = False
2516

    
2517
  def ExpandNames(self):
2518
    if self.op.instances:
2519
      self.wanted_names = []
2520
      for name in self.op.instances:
2521
        full_name = _ExpandInstanceName(self.cfg, name)
2522
        self.wanted_names.append(full_name)
2523
      self.needed_locks = {
2524
        locking.LEVEL_NODE: [],
2525
        locking.LEVEL_INSTANCE: self.wanted_names,
2526
        }
2527
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2528
    else:
2529
      self.wanted_names = None
2530
      self.needed_locks = {
2531
        locking.LEVEL_NODE: locking.ALL_SET,
2532
        locking.LEVEL_INSTANCE: locking.ALL_SET,
2533
        }
2534
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
2535

    
2536
  def DeclareLocks(self, level):
2537
    if level == locking.LEVEL_NODE and self.wanted_names is not None:
2538
      self._LockInstancesNodes(primary_only=True)
2539

    
2540
  def CheckPrereq(self):
2541
    """Check prerequisites.
2542

2543
    This only checks the optional instance list against the existing names.
2544

2545
    """
2546
    if self.wanted_names is None:
2547
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2548

    
2549
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
2550
                             in self.wanted_names]
2551

    
2552
  def _EnsureChildSizes(self, disk):
2553
    """Ensure children of the disk have the needed disk size.
2554

2555
    This is valid mainly for DRBD8 and fixes an issue where the
2556
    children have smaller disk size.
2557

2558
    @param disk: an L{ganeti.objects.Disk} object
2559

2560
    """
2561
    if disk.dev_type == constants.LD_DRBD8:
2562
      assert disk.children, "Empty children for DRBD8?"
2563
      fchild = disk.children[0]
2564
      mismatch = fchild.size < disk.size
2565
      if mismatch:
2566
        self.LogInfo("Child disk has size %d, parent %d, fixing",
2567
                     fchild.size, disk.size)
2568
        fchild.size = disk.size
2569

    
2570
      # and we recurse on this child only, not on the metadev
2571
      return self._EnsureChildSizes(fchild) or mismatch
2572
    else:
2573
      return False
2574

    
2575
  def Exec(self, feedback_fn):
2576
    """Verify the size of cluster disks.
2577

2578
    """
2579
    # TODO: check child disks too
2580
    # TODO: check differences in size between primary/secondary nodes
2581
    per_node_disks = {}
2582
    for instance in self.wanted_instances:
2583
      pnode = instance.primary_node
2584
      if pnode not in per_node_disks:
2585
        per_node_disks[pnode] = []
2586
      for idx, disk in enumerate(instance.disks):
2587
        per_node_disks[pnode].append((instance, idx, disk))
2588

    
2589
    changed = []
2590
    for node, dskl in per_node_disks.items():
2591
      newl = [v[2].Copy() for v in dskl]
2592
      for dsk in newl:
2593
        self.cfg.SetDiskID(dsk, node)
2594
      result = self.rpc.call_blockdev_getsize(node, newl)
2595
      if result.fail_msg:
2596
        self.LogWarning("Failure in blockdev_getsize call to node"
2597
                        " %s, ignoring", node)
2598
        continue
2599
      if len(result.payload) != len(dskl):
2600
        logging.warning("Invalid result from node %s: len(dksl)=%d,"
2601
                        " result.payload=%s", node, len(dskl), result.payload)
2602
        self.LogWarning("Invalid result from node %s, ignoring node results",
2603
                        node)
2604
        continue
2605
      for ((instance, idx, disk), size) in zip(dskl, result.payload):
2606
        if size is None:
2607
          self.LogWarning("Disk %d of instance %s did not return size"
2608
                          " information, ignoring", idx, instance.name)
2609
          continue
2610
        if not isinstance(size, (int, long)):
2611
          self.LogWarning("Disk %d of instance %s did not return valid"
2612
                          " size information, ignoring", idx, instance.name)
2613
          continue
2614
        size = size >> 20
2615
        if size != disk.size:
2616
          self.LogInfo("Disk %d of instance %s has mismatched size,"
2617
                       " correcting: recorded %d, actual %d", idx,
2618
                       instance.name, disk.size, size)
2619
          disk.size = size
2620
          self.cfg.Update(instance, feedback_fn)
2621
          changed.append((instance.name, idx, size))
2622
        if self._EnsureChildSizes(disk):
2623
          self.cfg.Update(instance, feedback_fn)
2624
          changed.append((instance.name, idx, disk.size))
2625
    return changed
2626

    
2627

    
2628
class LUClusterRename(LogicalUnit):
2629
  """Rename the cluster.
2630

2631
  """
2632
  HPATH = "cluster-rename"
2633
  HTYPE = constants.HTYPE_CLUSTER
2634

    
2635
  def BuildHooksEnv(self):
2636
    """Build hooks env.
2637

2638
    """
2639
    env = {
2640
      "OP_TARGET": self.cfg.GetClusterName(),
2641
      "NEW_NAME": self.op.name,
2642
      }
2643
    mn = self.cfg.GetMasterNode()
2644
    all_nodes = self.cfg.GetNodeList()
2645
    return env, [mn], all_nodes
2646

    
2647
  def CheckPrereq(self):
2648
    """Verify that the passed name is a valid one.
2649

2650
    """
2651
    hostname = netutils.GetHostname(name=self.op.name,
2652
                                    family=self.cfg.GetPrimaryIPFamily())
2653

    
2654
    new_name = hostname.name
2655
    self.ip = new_ip = hostname.ip
2656
    old_name = self.cfg.GetClusterName()
2657
    old_ip = self.cfg.GetMasterIP()
2658
    if new_name == old_name and new_ip == old_ip:
2659
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
2660
                                 " cluster has changed",
2661
                                 errors.ECODE_INVAL)
2662
    if new_ip != old_ip:
2663
      if netutils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
2664
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
2665
                                   " reachable on the network" %
2666
                                   new_ip, errors.ECODE_NOTUNIQUE)
2667

    
2668
    self.op.name = new_name
2669

    
2670
  def Exec(self, feedback_fn):
2671
    """Rename the cluster.
2672

2673
    """
2674
    clustername = self.op.name
2675
    ip = self.ip
2676

    
2677
    # shutdown the master IP
2678
    master = self.cfg.GetMasterNode()
2679
    result = self.rpc.call_node_stop_master(master, False)
2680
    result.Raise("Could not disable the master role")
2681

    
2682
    try:
2683
      cluster = self.cfg.GetClusterInfo()
2684
      cluster.cluster_name = clustername
2685
      cluster.master_ip = ip
2686
      self.cfg.Update(cluster, feedback_fn)
2687

    
2688
      # update the known hosts file
2689
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
2690
      node_list = self.cfg.GetOnlineNodeList()
2691
      try:
2692
        node_list.remove(master)
2693
      except ValueError:
2694
        pass
2695
      _UploadHelper(self, node_list, constants.SSH_KNOWN_HOSTS_FILE)
2696
    finally:
2697
      result = self.rpc.call_node_start_master(master, False, False)
2698
      msg = result.fail_msg
2699
      if msg:
2700
        self.LogWarning("Could not re-enable the master role on"
2701
                        " the master, please restart manually: %s", msg)
2702

    
2703
    return clustername
2704

    
2705

    
2706
class LUClusterSetParams(LogicalUnit):
2707
  """Change the parameters of the cluster.
2708

2709
  """
2710
  HPATH = "cluster-modify"
2711
  HTYPE = constants.HTYPE_CLUSTER
2712
  REQ_BGL = False
2713

    
2714
  def CheckArguments(self):
2715
    """Check parameters
2716

2717
    """
2718
    if self.op.uid_pool:
2719
      uidpool.CheckUidPool(self.op.uid_pool)
2720

    
2721
    if self.op.add_uids:
2722
      uidpool.CheckUidPool(self.op.add_uids)
2723

    
2724
    if self.op.remove_uids:
2725
      uidpool.CheckUidPool(self.op.remove_uids)
2726

    
2727
  def ExpandNames(self):
2728
    # FIXME: in the future maybe other cluster params won't require checking on
2729
    # all nodes to be modified.
2730
    self.needed_locks = {
2731
      locking.LEVEL_NODE: locking.ALL_SET,
2732
    }
2733
    self.share_locks[locking.LEVEL_NODE] = 1
2734

    
2735
  def BuildHooksEnv(self):
2736
    """Build hooks env.
2737

2738
    """
2739
    env = {
2740
      "OP_TARGET": self.cfg.GetClusterName(),
2741
      "NEW_VG_NAME": self.op.vg_name,
2742
      }
2743
    mn = self.cfg.GetMasterNode()
2744
    return env, [mn], [mn]
2745

    
2746
  def CheckPrereq(self):
2747
    """Check prerequisites.
2748

2749
    This checks whether the given params don't conflict and
2750
    if the given volume group is valid.
2751

2752
    """
2753
    if self.op.vg_name is not None and not self.op.vg_name:
2754
      if self.cfg.HasAnyDiskOfType(constants.LD_LV):
2755
        raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
2756
                                   " instances exist", errors.ECODE_INVAL)
2757

    
2758
    if self.op.drbd_helper is not None and not self.op.drbd_helper:
2759
      if self.cfg.HasAnyDiskOfType(constants.LD_DRBD8):
2760
        raise errors.OpPrereqError("Cannot disable drbd helper while"
2761
                                   " drbd-based instances exist",
2762
                                   errors.ECODE_INVAL)
2763

    
2764
    node_list = self.acquired_locks[locking.LEVEL_NODE]
2765

    
2766
    # if vg_name not None, checks given volume group on all nodes
2767
    if self.op.vg_name:
2768
      vglist = self.rpc.call_vg_list(node_list)
2769
      for node in node_list:
2770
        msg = vglist[node].fail_msg
2771
        if msg:
2772
          # ignoring down node
2773
          self.LogWarning("Error while gathering data on node %s"
2774
                          " (ignoring node): %s", node, msg)
2775
          continue
2776
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
2777
                                              self.op.vg_name,
2778
                                              constants.MIN_VG_SIZE)
2779
        if vgstatus:
2780
          raise errors.OpPrereqError("Error on node '%s': %s" %
2781
                                     (node, vgstatus), errors.ECODE_ENVIRON)
2782

    
2783
    if self.op.drbd_helper:
2784
      # checks given drbd helper on all nodes
2785
      helpers = self.rpc.call_drbd_helper(node_list)
2786
      for node in node_list:
2787
        ninfo = self.cfg.GetNodeInfo(node)
2788
        if ninfo.offline:
2789
          self.LogInfo("Not checking drbd helper on offline node %s", node)
2790
          continue
2791
        msg = helpers[node].fail_msg
2792
        if msg:
2793
          raise errors.OpPrereqError("Error checking drbd helper on node"
2794
                                     " '%s': %s" % (node, msg),
2795
                                     errors.ECODE_ENVIRON)
2796
        node_helper = helpers[node].payload
2797
        if node_helper != self.op.drbd_helper:
2798
          raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
2799
                                     (node, node_helper), errors.ECODE_ENVIRON)
2800

    
2801
    self.cluster = cluster = self.cfg.GetClusterInfo()
2802
    # validate params changes
2803
    if self.op.beparams:
2804
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
2805
      self.new_beparams = cluster.SimpleFillBE(self.op.beparams)
2806

    
2807
    if self.op.ndparams:
2808
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
2809
      self.new_ndparams = cluster.SimpleFillND(self.op.ndparams)
2810

    
2811
    if self.op.nicparams:
2812
      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
2813
      self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
2814
      objects.NIC.CheckParameterSyntax(self.new_nicparams)
2815
      nic_errors = []
2816

    
2817
      # check all instances for consistency
2818
      for instance in self.cfg.GetAllInstancesInfo().values():
2819
        for nic_idx, nic in enumerate(instance.nics):
2820
          params_copy = copy.deepcopy(nic.nicparams)
2821
          params_filled = objects.FillDict(self.new_nicparams, params_copy)
2822

    
2823
          # check parameter syntax
2824
          try:
2825
            objects.NIC.CheckParameterSyntax(params_filled)
2826
          except errors.ConfigurationError, err:
2827
            nic_errors.append("Instance %s, nic/%d: %s" %
2828
                              (instance.name, nic_idx, err))
2829

    
2830
          # if we're moving instances to routed, check that they have an ip
2831
          target_mode = params_filled[constants.NIC_MODE]
2832
          if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
2833
            nic_errors.append("Instance %s, nic/%d: routed nick with no ip" %
2834
                              (instance.name, nic_idx))
2835
      if nic_errors:
2836
        raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
2837
                                   "\n".join(nic_errors))
2838

    
2839
    # hypervisor list/parameters
2840
    self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
2841
    if self.op.hvparams:
2842
      for hv_name, hv_dict in self.op.hvparams.items():
2843
        if hv_name not in self.new_hvparams:
2844
          self.new_hvparams[hv_name] = hv_dict
2845
        else:
2846
          self.new_hvparams[hv_name].update(hv_dict)
2847

    
2848
    # os hypervisor parameters
2849
    self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
2850
    if self.op.os_hvp:
2851
      for os_name, hvs in self.op.os_hvp.items():
2852
        if os_name not in self.new_os_hvp:
2853
          self.new_os_hvp[os_name] = hvs
2854
        else:
2855
          for hv_name, hv_dict in hvs.items():
2856
            if hv_name not in self.new_os_hvp[os_name]:
2857
              self.new_os_hvp[os_name][hv_name] = hv_dict
2858
            else:
2859
              self.new_os_hvp[os_name][hv_name].update(hv_dict)
2860

    
2861
    # os parameters
2862
    self.new_osp = objects.FillDict(cluster.osparams, {})
2863
    if self.op.osparams:
2864
      for os_name, osp in self.op.osparams.items():
2865
        if os_name not in self.new_osp:
2866
          self.new_osp[os_name] = {}
2867

    
2868
        self.new_osp[os_name] = _GetUpdatedParams(self.new_osp[os_name], osp,
2869
                                                  use_none=True)
2870

    
2871
        if not self.new_osp[os_name]:
2872
          # we removed all parameters
2873
          del self.new_osp[os_name]
2874
        else:
2875
          # check the parameter validity (remote check)
2876
          _CheckOSParams(self, False, [self.cfg.GetMasterNode()],
2877
                         os_name, self.new_osp[os_name])
2878

    
2879
    # changes to the hypervisor list
2880
    if self.op.enabled_hypervisors is not None:
2881
      self.hv_list = self.op.enabled_hypervisors
2882
      for hv in self.hv_list:
2883
        # if the hypervisor doesn't already exist in the cluster
2884
        # hvparams, we initialize it to empty, and then (in both
2885
        # cases) we make sure to fill the defaults, as we might not
2886
        # have a complete defaults list if the hypervisor wasn't
2887
        # enabled before
2888
        if hv not in new_hvp:
2889
          new_hvp[hv] = {}
2890
        new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
2891
        utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
2892
    else:
2893
      self.hv_list = cluster.enabled_hypervisors
2894

    
2895
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
2896
      # either the enabled list has changed, or the parameters have, validate
2897
      for hv_name, hv_params in self.new_hvparams.items():
2898
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
2899
            (self.op.enabled_hypervisors and
2900
             hv_name in self.op.enabled_hypervisors)):
2901
          # either this is a new hypervisor, or its parameters have changed
2902
          hv_class = hypervisor.GetHypervisor(hv_name)
2903
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2904
          hv_class.CheckParameterSyntax(hv_params)
2905
          _CheckHVParams(self, node_list, hv_name, hv_params)
2906

    
2907
    if self.op.os_hvp:
2908
      # no need to check any newly-enabled hypervisors, since the
2909
      # defaults have already been checked in the above code-block
2910
      for os_name, os_hvp in self.new_os_hvp.items():
2911
        for hv_name, hv_params in os_hvp.items():
2912
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2913
          # we need to fill in the new os_hvp on top of the actual hv_p
2914
          cluster_defaults = self.new_hvparams.get(hv_name, {})
2915
          new_osp = objects.FillDict(cluster_defaults, hv_params)
2916
          hv_class = hypervisor.GetHypervisor(hv_name)
2917
          hv_class.CheckParameterSyntax(new_osp)
2918
          _CheckHVParams(self, node_list, hv_name, new_osp)
2919

    
2920
    if self.op.default_iallocator:
2921
      alloc_script = utils.FindFile(self.op.default_iallocator,
2922
                                    constants.IALLOCATOR_SEARCH_PATH,
2923
                                    os.path.isfile)
2924
      if alloc_script is None:
2925
        raise errors.OpPrereqError("Invalid default iallocator script '%s'"
2926
                                   " specified" % self.op.default_iallocator,
2927
                                   errors.ECODE_INVAL)
2928

    
2929
  def Exec(self, feedback_fn):
2930
    """Change the parameters of the cluster.
2931

2932
    """
2933
    if self.op.vg_name is not None:
2934
      new_volume = self.op.vg_name
2935
      if not new_volume:
2936
        new_volume = None
2937
      if new_volume != self.cfg.GetVGName():
2938
        self.cfg.SetVGName(new_volume)
2939
      else:
2940
        feedback_fn("Cluster LVM configuration already in desired"
2941
                    " state, not changing")
2942
    if self.op.drbd_helper is not None:
2943
      new_helper = self.op.drbd_helper
2944
      if not new_helper:
2945
        new_helper = None
2946
      if new_helper != self.cfg.GetDRBDHelper():
2947
        self.cfg.SetDRBDHelper(new_helper)
2948
      else:
2949
        feedback_fn("Cluster DRBD helper already in desired state,"
2950
                    " not changing")
2951
    if self.op.hvparams:
2952
      self.cluster.hvparams = self.new_hvparams
2953
    if self.op.os_hvp:
2954
      self.cluster.os_hvp = self.new_os_hvp
2955
    if self.op.enabled_hypervisors is not None:
2956
      self.cluster.hvparams = self.new_hvparams
2957
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
2958
    if self.op.beparams:
2959
      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
2960
    if self.op.nicparams:
2961
      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
2962
    if self.op.osparams:
2963
      self.cluster.osparams = self.new_osp
2964
    if self.op.ndparams:
2965
      self.cluster.ndparams = self.new_ndparams
2966

    
2967
    if self.op.candidate_pool_size is not None:
2968
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
2969
      # we need to update the pool size here, otherwise the save will fail
2970
      _AdjustCandidatePool(self, [])
2971

    
2972
    if self.op.maintain_node_health is not None:
2973
      self.cluster.maintain_node_health = self.op.maintain_node_health
2974

    
2975
    if self.op.prealloc_wipe_disks is not None:
2976
      self.cluster.prealloc_wipe_disks = self.op.prealloc_wipe_disks
2977

    
2978
    if self.op.add_uids is not None:
2979
      uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
2980

    
2981
    if self.op.remove_uids is not None:
2982
      uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
2983

    
2984
    if self.op.uid_pool is not None:
2985
      self.cluster.uid_pool = self.op.uid_pool
2986

    
2987
    if self.op.default_iallocator is not None:
2988
      self.cluster.default_iallocator = self.op.default_iallocator
2989

    
2990
    if self.op.reserved_lvs is not None:
2991
      self.cluster.reserved_lvs = self.op.reserved_lvs
2992

    
2993
    def helper_os(aname, mods, desc):
2994
      desc += " OS list"
2995
      lst = getattr(self.cluster, aname)
2996
      for key, val in mods:
2997
        if key == constants.DDM_ADD:
2998
          if val in lst:
2999
            feedback_fn("OS %s already in %s, ignoring" % (val, desc))
3000
          else:
3001
            lst.append(val)
3002
        elif key == constants.DDM_REMOVE:
3003
          if val in lst:
3004
            lst.remove(val)
3005
          else:
3006
            feedback_fn("OS %s not found in %s, ignoring" % (val, desc))
3007
        else:
3008
          raise errors.ProgrammerError("Invalid modification '%s'" % key)
3009

    
3010
    if self.op.hidden_os:
3011
      helper_os("hidden_os", self.op.hidden_os, "hidden")
3012

    
3013
    if self.op.blacklisted_os:
3014
      helper_os("blacklisted_os", self.op.blacklisted_os, "blacklisted")
3015

    
3016
    if self.op.master_netdev:
3017
      master = self.cfg.GetMasterNode()
3018
      feedback_fn("Shutting down master ip on the current netdev (%s)" %
3019
                  self.cluster.master_netdev)
3020
      result = self.rpc.call_node_stop_master(master, False)
3021
      result.Raise("Could not disable the master ip")
3022
      feedback_fn("Changing master_netdev from %s to %s" %
3023
                  (self.cluster.master_netdev, self.op.master_netdev))
3024
      self.cluster.master_netdev = self.op.master_netdev
3025

    
3026
    self.cfg.Update(self.cluster, feedback_fn)
3027

    
3028
    if self.op.master_netdev:
3029
      feedback_fn("Starting the master ip on the new master netdev (%s)" %
3030
                  self.op.master_netdev)
3031
      result = self.rpc.call_node_start_master(master, False, False)
3032
      if result.fail_msg:
3033
        self.LogWarning("Could not re-enable the master ip on"
3034
                        " the master, please restart manually: %s",
3035
                        result.fail_msg)
3036

    
3037

    
3038
def _UploadHelper(lu, nodes, fname):
3039
  """Helper for uploading a file and showing warnings.
3040

3041
  """
3042
  if os.path.exists(fname):
3043
    result = lu.rpc.call_upload_file(nodes, fname)
3044
    for to_node, to_result in result.items():
3045
      msg = to_result.fail_msg
3046
      if msg:
3047
        msg = ("Copy of file %s to node %s failed: %s" %
3048
               (fname, to_node, msg))
3049
        lu.proc.LogWarning(msg)
3050

    
3051

    
3052
def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
3053
  """Distribute additional files which are part of the cluster configuration.
3054

3055
  ConfigWriter takes care of distributing the config and ssconf files, but
3056
  there are more files which should be distributed to all nodes. This function
3057
  makes sure those are copied.
3058

3059
  @param lu: calling logical unit
3060
  @param additional_nodes: list of nodes not in the config to distribute to
3061
  @type additional_vm: boolean
3062
  @param additional_vm: whether the additional nodes are vm-capable or not
3063

3064
  """
3065
  # 1. Gather target nodes
3066
  myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
3067
  dist_nodes = lu.cfg.GetOnlineNodeList()
3068
  nvm_nodes = lu.cfg.GetNonVmCapableNodeList()
3069
  vm_nodes = [name for name in dist_nodes if name not in nvm_nodes]
3070
  if additional_nodes is not None:
3071
    dist_nodes.extend(additional_nodes)
3072
    if additional_vm:
3073
      vm_nodes.extend(additional_nodes)
3074
  if myself.name in dist_nodes:
3075
    dist_nodes.remove(myself.name)
3076
  if myself.name in vm_nodes:
3077
    vm_nodes.remove(myself.name)
3078

    
3079
  # 2. Gather files to distribute
3080
  dist_files = set([constants.ETC_HOSTS,
3081
                    constants.SSH_KNOWN_HOSTS_FILE,
3082
                    constants.RAPI_CERT_FILE,
3083
                    constants.RAPI_USERS_FILE,
3084
                    constants.CONFD_HMAC_KEY,
3085
                    constants.CLUSTER_DOMAIN_SECRET_FILE,
3086
                   ])
3087

    
3088
  vm_files = set()
3089
  enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
3090
  for hv_name in enabled_hypervisors:
3091
    hv_class = hypervisor.GetHypervisor(hv_name)
3092
    vm_files.update(hv_class.GetAncillaryFiles())
3093

    
3094
  # 3. Perform the files upload
3095
  for fname in dist_files:
3096
    _UploadHelper(lu, dist_nodes, fname)
3097
  for fname in vm_files:
3098
    _UploadHelper(lu, vm_nodes, fname)
3099

    
3100

    
3101
class LUClusterRedistConf(NoHooksLU):
3102
  """Force the redistribution of cluster configuration.
3103

3104
  This is a very simple LU.
3105

3106
  """
3107
  REQ_BGL = False
3108

    
3109
  def ExpandNames(self):
3110
    self.needed_locks = {
3111
      locking.LEVEL_NODE: locking.ALL_SET,
3112
    }
3113
    self.share_locks[locking.LEVEL_NODE] = 1
3114

    
3115
  def Exec(self, feedback_fn):
3116
    """Redistribute the configuration.
3117

3118
    """
3119
    self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
3120
    _RedistributeAncillaryFiles(self)
3121

    
3122

    
3123
def _WaitForSync(lu, instance, disks=None, oneshot=False):
3124
  """Sleep and poll for an instance's disk to sync.
3125

3126
  """
3127
  if not instance.disks or disks is not None and not disks:
3128
    return True
3129

    
3130
  disks = _ExpandCheckDisks(instance, disks)
3131

    
3132
  if not oneshot:
3133
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
3134

    
3135
  node = instance.primary_node
3136

    
3137
  for dev in disks:
3138
    lu.cfg.SetDiskID(dev, node)
3139

    
3140
  # TODO: Convert to utils.Retry
3141

    
3142
  retries = 0
3143
  degr_retries = 10 # in seconds, as we sleep 1 second each time
3144
  while True:
3145
    max_time = 0
3146
    done = True
3147
    cumul_degraded = False
3148
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, disks)
3149
    msg = rstats.fail_msg
3150
    if msg:
3151
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
3152
      retries += 1
3153
      if retries >= 10:
3154
        raise errors.RemoteError("Can't contact node %s for mirror data,"
3155
                                 " aborting." % node)
3156
      time.sleep(6)
3157
      continue
3158
    rstats = rstats.payload
3159
    retries = 0
3160
    for i, mstat in enumerate(rstats):
3161
      if mstat is None:
3162
        lu.LogWarning("Can't compute data for node %s/%s",
3163
                           node, disks[i].iv_name)
3164
        continue
3165

    
3166
      cumul_degraded = (cumul_degraded or
3167
                        (mstat.is_degraded and mstat.sync_percent is None))
3168
      if mstat.sync_percent is not None:
3169
        done = False
3170
        if mstat.estimated_time is not None:
3171
          rem_time = ("%s remaining (estimated)" %
3172
                      utils.FormatSeconds(mstat.estimated_time))
3173
          max_time = mstat.estimated_time
3174
        else:
3175
          rem_time = "no time estimate"
3176
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
3177
                        (disks[i].iv_name, mstat.sync_percent, rem_time))
3178

    
3179
    # if we're done but degraded, let's do a few small retries, to
3180
    # make sure we see a stable and not transient situation; therefore
3181
    # we force restart of the loop
3182
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
3183
      logging.info("Degraded disks found, %d retries left", degr_retries)
3184
      degr_retries -= 1
3185
      time.sleep(1)
3186
      continue
3187

    
3188
    if done or oneshot:
3189
      break
3190

    
3191
    time.sleep(min(60, max_time))
3192

    
3193
  if done:
3194
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
3195
  return not cumul_degraded
3196

    
3197

    
3198
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
3199
  """Check that mirrors are not degraded.
3200

3201
  The ldisk parameter, if True, will change the test from the
3202
  is_degraded attribute (which represents overall non-ok status for
3203
  the device(s)) to the ldisk (representing the local storage status).
3204

3205
  """
3206
  lu.cfg.SetDiskID(dev, node)
3207

    
3208
  result = True
3209

    
3210
  if on_primary or dev.AssembleOnSecondary():
3211
    rstats = lu.rpc.call_blockdev_find(node, dev)
3212
    msg = rstats.fail_msg
3213
    if msg:
3214
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
3215
      result = False
3216
    elif not rstats.payload:
3217
      lu.LogWarning("Can't find disk on node %s", node)
3218
      result = False
3219
    else:
3220
      if ldisk:
3221
        result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
3222
      else:
3223
        result = result and not rstats.payload.is_degraded
3224

    
3225
  if dev.children:
3226
    for child in dev.children:
3227
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
3228

    
3229
  return result
3230

    
3231

    
3232
class LUOobCommand(NoHooksLU):
3233
  """Logical unit for OOB handling.
3234

3235
  """
3236
  REG_BGL = False
3237
  _SKIP_MASTER = (constants.OOB_POWER_OFF, constants.OOB_POWER_CYCLE)
3238

    
3239
  def CheckPrereq(self):
3240
    """Check prerequisites.
3241

3242
    This checks:
3243
     - the node exists in the configuration
3244
     - OOB is supported
3245

3246
    Any errors are signaled by raising errors.OpPrereqError.
3247

3248
    """
3249
    self.nodes = []
3250
    self.master_node = self.cfg.GetMasterNode()
3251

    
3252
    if self.op.node_names:
3253
      if self.op.command in self._SKIP_MASTER:
3254
        if self.master_node in self.op.node_names:
3255
          master_node_obj = self.cfg.GetNodeInfo(self.master_node)
3256
          master_oob_handler = _SupportsOob(self.cfg, master_node_obj)
3257

    
3258
          if master_oob_handler:
3259
            additional_text = ("Run '%s %s %s' if you want to operate on the"
3260
                               " master regardless") % (master_oob_handler,
3261
                                                        self.op.command,
3262
                                                        self.master_node)
3263
          else:
3264
            additional_text = "The master node does not support out-of-band"
3265

    
3266
          raise errors.OpPrereqError(("Operating on the master node %s is not"
3267
                                      " allowed for %s\n%s") %
3268
                                     (self.master_node, self.op.command,
3269
                                      additional_text), errors.ECODE_INVAL)
3270
    else:
3271
      self.op.node_names = self.cfg.GetNodeList()
3272
      if self.op.command in self._SKIP_MASTER:
3273
        self.op.node_names.remove(self.master_node)
3274

    
3275
    if self.op.command in self._SKIP_MASTER:
3276
      assert self.master_node not in self.op.node_names
3277

    
3278
    for node_name in self.op.node_names:
3279
      node = self.cfg.GetNodeInfo(node_name)
3280

    
3281
      if node is None:
3282
        raise errors.OpPrereqError("Node %s not found" % node_name,
3283
                                   errors.ECODE_NOENT)
3284
      else:
3285
        self.nodes.append(node)
3286

    
3287
      if (not self.op.ignore_status and
3288
          (self.op.command == constants.OOB_POWER_OFF and not node.offline)):
3289
        raise errors.OpPrereqError(("Cannot power off node %s because it is"
3290
                                    " not marked offline") % node_name,
3291
                                   errors.ECODE_STATE)
3292

    
3293
  def ExpandNames(self):
3294
    """Gather locks we need.
3295

3296
    """
3297
    if self.op.node_names:
3298
      self.op.node_names = [_ExpandNodeName(self.cfg, name)
3299
                            for name in self.op.node_names]
3300
      lock_names = self.op.node_names
3301
    else:
3302
      lock_names = locking.ALL_SET
3303

    
3304
    self.needed_locks = {
3305
      locking.LEVEL_NODE: lock_names,
3306
      }
3307

    
3308
  def Exec(self, feedback_fn):
3309
    """Execute OOB and return result if we expect any.
3310

3311
    """
3312
    master_node = self.master_node
3313
    ret = []
3314

    
3315
    for node in self.nodes:
3316
      node_entry = [(constants.RS_NORMAL, node.name)]
3317
      ret.append(node_entry)
3318

    
3319
      oob_program = _SupportsOob(self.cfg, node)
3320

    
3321
      if not oob_program:
3322
        node_entry.append((constants.RS_UNAVAIL, None))
3323
        continue
3324

    
3325
      logging.info("Executing out-of-band command '%s' using '%s' on %s",
3326
                   self.op.command, oob_program, node.name)
3327
      result = self.rpc.call_run_oob(master_node, oob_program,
3328
                                     self.op.command, node.name,
3329
                                     self.op.timeout)
3330

    
3331
      if result.fail_msg:
3332
        self.LogWarning("On node '%s' out-of-band RPC failed with: %s",
3333
                        node.name, result.fail_msg)
3334
        node_entry.append((constants.RS_NODATA, None))
3335
      else:
3336
        try:
3337
          self._CheckPayload(result)
3338
        except errors.OpExecError, err:
3339
          self.LogWarning("The payload returned by '%s' is not valid: %s",
3340
                          node.name, err)
3341
          node_entry.append((constants.RS_NODATA, None))
3342
        else:
3343
          if self.op.command == constants.OOB_HEALTH:
3344
            # For health we should log important events
3345
            for item, status in result.payload:
3346
              if status in [constants.OOB_STATUS_WARNING,
3347
                            constants.OOB_STATUS_CRITICAL]:
3348
                self.LogWarning("On node '%s' item '%s' has status '%s'",
3349
                                node.name, item, status)
3350

    
3351
          if self.op.command == constants.OOB_POWER_ON:
3352
            node.powered = True
3353
          elif self.op.command == constants.OOB_POWER_OFF:
3354
            node.powered = False
3355
          elif self.op.command == constants.OOB_POWER_STATUS:
3356
            powered = result.payload[constants.OOB_POWER_STATUS_POWERED]
3357
            if powered != node.powered:
3358
              logging.warning(("Recorded power state (%s) of node '%s' does not"
3359
                               " match actual power state (%s)"), node.powered,
3360
                              node.name, powered)
3361

    
3362
          # For configuration changing commands we should update the node
3363
          if self.op.command in (constants.OOB_POWER_ON,
3364
                                 constants.OOB_POWER_OFF):
3365
            self.cfg.Update(node, feedback_fn)
3366

    
3367
          node_entry.append((constants.RS_NORMAL, result.payload))
3368

    
3369
    return ret
3370

    
3371
  def _CheckPayload(self, result):
3372
    """Checks if the payload is valid.
3373

3374
    @param result: RPC result
3375
    @raises errors.OpExecError: If payload is not valid
3376

3377
    """
3378
    errs = []
3379
    if self.op.command == constants.OOB_HEALTH:
3380
      if not isinstance(result.payload, list):
3381
        errs.append("command 'health' is expected to return a list but got %s" %
3382
                    type(result.payload))
3383
      else:
3384
        for item, status in result.payload:
3385
          if status not in constants.OOB_STATUSES:
3386
            errs.append("health item '%s' has invalid status '%s'" %
3387
                        (item, status))
3388

    
3389
    if self.op.command == constants.OOB_POWER_STATUS:
3390
      if not isinstance(result.payload, dict):
3391
        errs.append("power-status is expected to return a dict but got %s" %
3392
                    type(result.payload))
3393

    
3394
    if self.op.command in [
3395
        constants.OOB_POWER_ON,
3396
        constants.OOB_POWER_OFF,
3397
        constants.OOB_POWER_CYCLE,
3398
        ]:
3399
      if result.payload is not None:
3400
        errs.append("%s is expected to not return payload but got '%s'" %
3401
                    (self.op.command, result.payload))
3402

    
3403
    if errs:
3404
      raise errors.OpExecError("Check of out-of-band payload failed due to %s" %
3405
                               utils.CommaJoin(errs))
3406

    
3407

    
3408

    
3409
class LUOsDiagnose(NoHooksLU):
3410
  """Logical unit for OS diagnose/query.
3411

3412
  """
3413
  REQ_BGL = False
3414
  _HID = "hidden"
3415
  _BLK = "blacklisted"
3416
  _VLD = "valid"
3417
  _FIELDS_STATIC = utils.FieldSet()
3418
  _FIELDS_DYNAMIC = utils.FieldSet("name", _VLD, "node_status", "variants",
3419
                                   "parameters", "api_versions", _HID, _BLK)
3420

    
3421
  def CheckArguments(self):
3422
    if self.op.names:
3423
      raise errors.OpPrereqError("Selective OS query not supported",
3424
                                 errors.ECODE_INVAL)
3425

    
3426
    _CheckOutputFields(static=self._FIELDS_STATIC,
3427
                       dynamic=self._FIELDS_DYNAMIC,
3428
                       selected=self.op.output_fields)
3429

    
3430
  def ExpandNames(self):
3431
    # Lock all nodes, in shared mode
3432
    # Temporary removal of locks, should be reverted later
3433
    # TODO: reintroduce locks when they are lighter-weight
3434
    self.needed_locks = {}
3435
    #self.share_locks[locking.LEVEL_NODE] = 1
3436
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3437

    
3438
  @staticmethod
3439
  def _DiagnoseByOS(rlist):
3440
    """Remaps a per-node return list into an a per-os per-node dictionary
3441

3442
    @param rlist: a map with node names as keys and OS objects as values
3443

3444
    @rtype: dict
3445
    @return: a dictionary with osnames as keys and as value another
3446
        map, with nodes as keys and tuples of (path, status, diagnose,
3447
        variants, parameters, api_versions) as values, eg::
3448

3449
          {"debian-etch": {"node1": [(/usr/lib/..., True, "", [], []),
3450
                                     (/srv/..., False, "invalid api")],
3451
                           "node2": [(/srv/..., True, "", [], [])]}
3452
          }
3453

3454
    """
3455
    all_os = {}
3456
    # we build here the list of nodes that didn't fail the RPC (at RPC
3457
    # level), so that nodes with a non-responding node daemon don't
3458
    # make all OSes invalid
3459
    good_nodes = [node_name for node_name in rlist
3460
                  if not rlist[node_name].fail_msg]
3461
    for node_name, nr in rlist.items():
3462
      if nr.fail_msg or not nr.payload:
3463
        continue
3464
      for (name, path, status, diagnose, variants,
3465
           params, api_versions) in nr.payload:
3466
        if name not in all_os:
3467
          # build a list of nodes for this os containing empty lists
3468
          # for each node in node_list
3469
          all_os[name] = {}
3470
          for nname in good_nodes:
3471
            all_os[name][nname] = []
3472
        # convert params from [name, help] to (name, help)
3473
        params = [tuple(v) for v in params]
3474
        all_os[name][node_name].append((path, status, diagnose,
3475
                                        variants, params, api_versions))
3476
    return all_os
3477

    
3478
  def Exec(self, feedback_fn):
3479
    """Compute the list of OSes.
3480

3481
    """
3482
    valid_nodes = [node.name
3483
                   for node in self.cfg.GetAllNodesInfo().values()
3484
                   if not node.offline and node.vm_capable]
3485
    node_data = self.rpc.call_os_diagnose(valid_nodes)
3486
    pol = self._DiagnoseByOS(node_data)
3487
    output = []
3488
    cluster = self.cfg.GetClusterInfo()
3489

    
3490
    for os_name in utils.NiceSort(pol.keys()):
3491
      os_data = pol[os_name]
3492
      row = []
3493
      valid = True
3494
      (variants, params, api_versions) = null_state = (set(), set(), set())
3495
      for idx, osl in enumerate(os_data.values()):
3496
        valid = bool(valid and osl and osl[0][1])
3497
        if not valid:
3498
          (variants, params, api_versions) = null_state
3499
          break
3500
        node_variants, node_params, node_api = osl[0][3:6]
3501
        if idx == 0: # first entry
3502
          variants = set(node_variants)
3503
          params = set(node_params)
3504
          api_versions = set(node_api)
3505
        else: # keep consistency
3506
          variants.intersection_update(node_variants)
3507
          params.intersection_update(node_params)
3508
          api_versions.intersection_update(node_api)
3509

    
3510
      is_hid = os_name in cluster.hidden_os
3511
      is_blk = os_name in cluster.blacklisted_os
3512
      if ((self._HID not in self.op.output_fields and is_hid) or
3513
          (self._BLK not in self.op.output_fields and is_blk) or
3514
          (self._VLD not in self.op.output_fields and not valid)):
3515
        continue
3516

    
3517
      for field in self.op.output_fields:
3518
        if field == "name":
3519
          val = os_name
3520
        elif field == self._VLD:
3521
          val = valid
3522
        elif field == "node_status":
3523
          # this is just a copy of the dict
3524
          val = {}
3525
          for node_name, nos_list in os_data.items():
3526
            val[node_name] = nos_list
3527
        elif field == "variants":
3528
          val = utils.NiceSort(list(variants))
3529
        elif field == "parameters":
3530
          val = list(params)
3531
        elif field == "api_versions":
3532
          val = list(api_versions)
3533
        elif field == self._HID:
3534
          val = is_hid
3535
        elif field == self._BLK:
3536
          val = is_blk
3537
        else:
3538
          raise errors.ParameterError(field)
3539
        row.append(val)
3540
      output.append(row)
3541

    
3542
    return output
3543

    
3544

    
3545
class LUNodeRemove(LogicalUnit):
3546
  """Logical unit for removing a node.
3547

3548
  """
3549
  HPATH = "node-remove"
3550
  HTYPE = constants.HTYPE_NODE
3551

    
3552
  def BuildHooksEnv(self):
3553
    """Build hooks env.
3554

3555
    This doesn't run on the target node in the pre phase as a failed
3556
    node would then be impossible to remove.
3557

3558
    """
3559
    env = {
3560
      "OP_TARGET": self.op.node_name,
3561
      "NODE_NAME": self.op.node_name,
3562
      }
3563
    all_nodes = self.cfg.GetNodeList()
3564
    try:
3565
      all_nodes.remove(self.op.node_name)
3566
    except ValueError:
3567
      logging.warning("Node %s which is about to be removed not found"
3568
                      " in the all nodes list", self.op.node_name)
3569
    return env, all_nodes, all_nodes
3570

    
3571
  def CheckPrereq(self):
3572
    """Check prerequisites.
3573

3574
    This checks:
3575
     - the node exists in the configuration
3576
     - it does not have primary or secondary instances
3577
     - it's not the master
3578

3579
    Any errors are signaled by raising errors.OpPrereqError.
3580

3581
    """
3582
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3583
    node = self.cfg.GetNodeInfo(self.op.node_name)
3584
    assert node is not None
3585

    
3586
    instance_list = self.cfg.GetInstanceList()
3587

    
3588
    masternode = self.cfg.GetMasterNode()
3589
    if node.name == masternode:
3590
      raise errors.OpPrereqError("Node is the master node,"
3591
                                 " you need to failover first.",
3592
                                 errors.ECODE_INVAL)
3593

    
3594
    for instance_name in instance_list:
3595
      instance = self.cfg.GetInstanceInfo(instance_name)
3596
      if node.name in instance.all_nodes:
3597
        raise errors.OpPrereqError("Instance %s is still running on the node,"
3598
                                   " please remove first." % instance_name,
3599
                                   errors.ECODE_INVAL)
3600
    self.op.node_name = node.name
3601
    self.node = node
3602

    
3603
  def Exec(self, feedback_fn):
3604
    """Removes the node from the cluster.
3605

3606
    """
3607
    node = self.node
3608
    logging.info("Stopping the node daemon and removing configs from node %s",
3609
                 node.name)
3610

    
3611
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
3612

    
3613
    # Promote nodes to master candidate as needed
3614
    _AdjustCandidatePool(self, exceptions=[node.name])
3615
    self.context.RemoveNode(node.name)
3616

    
3617
    # Run post hooks on the node before it's removed
3618
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
3619
    try:
3620
      hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
3621
    except:
3622
      # pylint: disable-msg=W0702
3623
      self.LogWarning("Errors occurred running hooks on %s" % node.name)
3624

    
3625
    result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
3626
    msg = result.fail_msg
3627
    if msg:
3628
      self.LogWarning("Errors encountered on the remote node while leaving"
3629
                      " the cluster: %s", msg)
3630

    
3631
    # Remove node from our /etc/hosts
3632
    if self.cfg.GetClusterInfo().modify_etc_hosts:
3633
      master_node = self.cfg.GetMasterNode()
3634
      result = self.rpc.call_etc_hosts_modify(master_node,
3635
                                              constants.ETC_HOSTS_REMOVE,
3636
                                              node.name, None)
3637
      result.Raise("Can't update hosts file with new host data")
3638
      _RedistributeAncillaryFiles(self)
3639

    
3640

    
3641
class _NodeQuery(_QueryBase):
3642
  FIELDS = query.NODE_FIELDS
3643

    
3644
  def ExpandNames(self, lu):
3645
    lu.needed_locks = {}
3646
    lu.share_locks[locking.LEVEL_NODE] = 1
3647

    
3648
    if self.names:
3649
      self.wanted = _GetWantedNodes(lu, self.names)
3650
    else:
3651
      self.wanted = locking.ALL_SET
3652

    
3653
    self.do_locking = (self.use_locking and
3654
                       query.NQ_LIVE in self.requested_data)
3655

    
3656
    if self.do_locking:
3657
      # if we don't request only static fields, we need to lock the nodes
3658
      lu.needed_locks[locking.LEVEL_NODE] = self.wanted
3659

    
3660
  def DeclareLocks(self, lu, level):
3661
    pass
3662

    
3663
  def _GetQueryData(self, lu):
3664
    """Computes the list of nodes and their attributes.
3665

3666
    """
3667
    all_info = lu.cfg.GetAllNodesInfo()
3668

    
3669
    nodenames = self._GetNames(lu, all_info.keys(), locking.LEVEL_NODE)
3670

    
3671
    # Gather data as requested
3672
    if query.NQ_LIVE in self.requested_data:
3673
      # filter out non-vm_capable nodes
3674
      toquery_nodes = [name for name in nodenames if all_info[name].vm_capable]
3675

    
3676
      node_data = lu.rpc.call_node_info(toquery_nodes, lu.cfg.GetVGName(),
3677
                                        lu.cfg.GetHypervisorType())
3678
      live_data = dict((name, nresult.payload)
3679
                       for (name, nresult) in node_data.items()
3680
                       if not nresult.fail_msg and nresult.payload)
3681
    else:
3682
      live_data = None
3683

    
3684
    if query.NQ_INST in self.requested_data:
3685
      node_to_primary = dict([(name, set()) for name in nodenames])
3686
      node_to_secondary = dict([(name, set()) for name in nodenames])
3687

    
3688
      inst_data = lu.cfg.GetAllInstancesInfo()
3689

    
3690
      for inst in inst_data.values():
3691
        if inst.primary_node in node_to_primary:
3692
          node_to_primary[inst.primary_node].add(inst.name)
3693
        for secnode in inst.secondary_nodes:
3694
          if secnode in node_to_secondary:
3695
            node_to_secondary[secnode].add(inst.name)
3696
    else:
3697
      node_to_primary = None
3698
      node_to_secondary = None
3699

    
3700
    if query.NQ_OOB in self.requested_data:
3701
      oob_support = dict((name, bool(_SupportsOob(lu.cfg, node)))
3702
                         for name, node in all_info.iteritems())
3703
    else:
3704
      oob_support = None
3705

    
3706
    if query.NQ_GROUP in self.requested_data:
3707
      groups = lu.cfg.GetAllNodeGroupsInfo()
3708
    else:
3709
      groups = {}
3710

    
3711
    return query.NodeQueryData([all_info[name] for name in nodenames],
3712
                               live_data, lu.cfg.GetMasterNode(),
3713
                               node_to_primary, node_to_secondary, groups,
3714
                               oob_support, lu.cfg.GetClusterInfo())
3715

    
3716

    
3717
class LUNodeQuery(NoHooksLU):
3718
  """Logical unit for querying nodes.
3719

3720
  """
3721
  # pylint: disable-msg=W0142
3722
  REQ_BGL = False
3723

    
3724
  def CheckArguments(self):
3725
    self.nq = _NodeQuery(qlang.MakeSimpleFilter("name", self.op.names),
3726
                         self.op.output_fields, self.op.use_locking)
3727

    
3728
  def ExpandNames(self):
3729
    self.nq.ExpandNames(self)
3730

    
3731
  def Exec(self, feedback_fn):
3732
    return self.nq.OldStyleQuery(self)
3733

    
3734

    
3735
class LUNodeQueryvols(NoHooksLU):
3736
  """Logical unit for getting volumes on node(s).
3737

3738
  """
3739
  REQ_BGL = False
3740
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
3741
  _FIELDS_STATIC = utils.FieldSet("node")
3742

    
3743
  def CheckArguments(self):
3744
    _CheckOutputFields(static=self._FIELDS_STATIC,
3745
                       dynamic=self._FIELDS_DYNAMIC,
3746
                       selected=self.op.output_fields)
3747

    
3748
  def ExpandNames(self):
3749
    self.needed_locks = {}
3750
    self.share_locks[locking.LEVEL_NODE] = 1
3751
    if not self.op.nodes:
3752
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3753
    else:
3754
      self.needed_locks[locking.LEVEL_NODE] = \
3755
        _GetWantedNodes(self, self.op.nodes)
3756

    
3757
  def Exec(self, feedback_fn):
3758
    """Computes the list of nodes and their attributes.
3759

3760
    """
3761
    nodenames = self.acquired_locks[locking.LEVEL_NODE]
3762
    volumes = self.rpc.call_node_volumes(nodenames)
3763

    
3764
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
3765
             in self.cfg.GetInstanceList()]
3766

    
3767
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
3768

    
3769
    output = []
3770
    for node in nodenames:
3771
      nresult = volumes[node]
3772
      if nresult.offline:
3773
        continue
3774
      msg = nresult.fail_msg
3775
      if msg:
3776
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
3777
        continue
3778

    
3779
      node_vols = nresult.payload[:]
3780
      node_vols.sort(key=lambda vol: vol['dev'])
3781

    
3782
      for vol in node_vols:
3783
        node_output = []
3784
        for field in self.op.output_fields:
3785
          if field == "node":
3786
            val = node
3787
          elif field == "phys":
3788
            val = vol['dev']
3789
          elif field == "vg":
3790
            val = vol['vg']
3791
          elif field == "name":
3792
            val = vol['name']
3793
          elif field == "size":
3794
            val = int(float(vol['size']))
3795
          elif field == "instance":
3796
            for inst in ilist:
3797
              if node not in lv_by_node[inst]:
3798
                continue
3799
              if vol['name'] in lv_by_node[inst][node]:
3800
                val = inst.name
3801
                break
3802
            else:
3803
              val = '-'
3804
          else:
3805
            raise errors.ParameterError(field)
3806
          node_output.append(str(val))
3807

    
3808
        output.append(node_output)
3809

    
3810
    return output
3811

    
3812

    
3813
class LUNodeQueryStorage(NoHooksLU):
3814
  """Logical unit for getting information on storage units on node(s).
3815

3816
  """
3817
  _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
3818
  REQ_BGL = False
3819

    
3820
  def CheckArguments(self):
3821
    _CheckOutputFields(static=self._FIELDS_STATIC,
3822
                       dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
3823
                       selected=self.op.output_fields)
3824

    
3825
  def ExpandNames(self):
3826
    self.needed_locks = {}
3827
    self.share_locks[locking.LEVEL_NODE] = 1
3828

    
3829
    if self.op.nodes:
3830
      self.needed_locks[locking.LEVEL_NODE] = \
3831
        _GetWantedNodes(self, self.op.nodes)
3832
    else:
3833
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3834

    
3835
  def Exec(self, feedback_fn):
3836
    """Computes the list of nodes and their attributes.
3837

3838
    """
3839
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
3840

    
3841
    # Always get name to sort by
3842
    if constants.SF_NAME in self.op.output_fields:
3843
      fields = self.op.output_fields[:]
3844
    else:
3845
      fields = [constants.SF_NAME] + self.op.output_fields
3846

    
3847
    # Never ask for node or type as it's only known to the LU
3848
    for extra in [constants.SF_NODE, constants.SF_TYPE]:
3849
      while extra in fields:
3850
        fields.remove(extra)
3851

    
3852
    field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
3853
    name_idx = field_idx[constants.SF_NAME]
3854

    
3855
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
3856
    data = self.rpc.call_storage_list(self.nodes,
3857
                                      self.op.storage_type, st_args,
3858
                                      self.op.name, fields)
3859

    
3860
    result = []
3861

    
3862
    for node in utils.NiceSort(self.nodes):
3863
      nresult = data[node]
3864
      if nresult.offline:
3865
        continue
3866

    
3867
      msg = nresult.fail_msg
3868
      if msg:
3869
        self.LogWarning("Can't get storage data from node %s: %s", node, msg)
3870
        continue
3871

    
3872
      rows = dict([(row[name_idx], row) for row in nresult.payload])
3873

    
3874
      for name in utils.NiceSort(rows.keys()):
3875
        row = rows[name]
3876

    
3877
        out = []
3878

    
3879
        for field in self.op.output_fields:
3880
          if field == constants.SF_NODE:
3881
            val = node
3882
          elif field == constants.SF_TYPE:
3883
            val = self.op.storage_type
3884
          elif field in field_idx:
3885
            val = row[field_idx[field]]
3886
          else:
3887
            raise errors.ParameterError(field)
3888

    
3889
          out.append(val)
3890

    
3891
        result.append(out)
3892

    
3893
    return result
3894

    
3895

    
3896
class _InstanceQuery(_QueryBase):
3897
  FIELDS = query.INSTANCE_FIELDS
3898

    
3899
  def ExpandNames(self, lu):
3900
    lu.needed_locks = {}
3901
    lu.share_locks[locking.LEVEL_INSTANCE] = 1
3902
    lu.share_locks[locking.LEVEL_NODE] = 1
3903

    
3904
    if self.names:
3905
      self.wanted = _GetWantedInstances(lu, self.names)
3906
    else:
3907
      self.wanted = locking.ALL_SET
3908

    
3909
    self.do_locking = (self.use_locking and
3910
                       query.IQ_LIVE in self.requested_data)
3911
    if self.do_locking:
3912
      lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3913
      lu.needed_locks[locking.LEVEL_NODE] = []
3914
      lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3915

    
3916
  def DeclareLocks(self, lu, level):
3917
    if level == locking.LEVEL_NODE and self.do_locking:
3918
      lu._LockInstancesNodes() # pylint: disable-msg=W0212
3919

    
3920
  def _GetQueryData(self, lu):
3921
    """Computes the list of instances and their attributes.
3922

3923
    """
3924
    cluster = lu.cfg.GetClusterInfo()
3925
    all_info = lu.cfg.GetAllInstancesInfo()
3926

    
3927
    instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
3928

    
3929
    instance_list = [all_info[name] for name in instance_names]
3930
    nodes = frozenset(itertools.chain(*(inst.all_nodes
3931
                                        for inst in instance_list)))
3932
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
3933
    bad_nodes = []
3934
    offline_nodes = []
3935
    wrongnode_inst = set()
3936

    
3937
    # Gather data as requested
3938
    if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
3939
      live_data = {}
3940
      node_data = lu.rpc.call_all_instances_info(nodes, hv_list)
3941
      for name in nodes:
3942
        result = node_data[name]
3943
        if result.offline:
3944
          # offline nodes will be in both lists
3945
          assert result.fail_msg
3946
          offline_nodes.append(name)
3947
        if result.fail_msg:
3948
          bad_nodes.append(name)
3949
        elif result.payload:
3950
          for inst in result.payload:
3951
            if all_info[inst].primary_node == name:
3952
              live_data.update(result.payload)
3953
            else:
3954
              wrongnode_inst.add(inst)
3955
        # else no instance is alive
3956
    else:
3957
      live_data = {}
3958

    
3959
    if query.IQ_DISKUSAGE in self.requested_data:
3960
      disk_usage = dict((inst.name,
3961
                         _ComputeDiskSize(inst.disk_template,
3962
                                          [{"size": disk.size}
3963
                                           for disk in inst.disks]))
3964
                        for inst in instance_list)
3965
    else:
3966
      disk_usage = None
3967

    
3968
    if query.IQ_CONSOLE in self.requested_data:
3969
      consinfo = {}
3970
      for inst in instance_list:
3971
        if inst.name in live_data:
3972
          # Instance is running
3973
          consinfo[inst.name] = _GetInstanceConsole(cluster, inst)
3974
        else:
3975
          consinfo[inst.name] = None
3976
      assert set(consinfo.keys()) == set(instance_names)
3977
    else:
3978
      consinfo = None
3979

    
3980
    return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
3981
                                   disk_usage, offline_nodes, bad_nodes,
3982
                                   live_data, wrongnode_inst, consinfo)
3983

    
3984

    
3985
class LUQuery(NoHooksLU):
3986
  """Query for resources/items of a certain kind.
3987

3988
  """
3989
  # pylint: disable-msg=W0142
3990
  REQ_BGL = False
3991

    
3992
  def CheckArguments(self):
3993
    qcls = _GetQueryImplementation(self.op.what)
3994

    
3995
    self.impl = qcls(self.op.filter, self.op.fields, False)
3996

    
3997
  def ExpandNames(self):
3998
    self.impl.ExpandNames(self)
3999

    
4000
  def DeclareLocks(self, level):
4001
    self.impl.DeclareLocks(self, level)
4002

    
4003
  def Exec(self, feedback_fn):
4004
    return self.impl.NewStyleQuery(self)
4005

    
4006

    
4007
class LUQueryFields(NoHooksLU):
4008
  """Query for resources/items of a certain kind.
4009

4010
  """
4011
  # pylint: disable-msg=W0142
4012
  REQ_BGL = False
4013

    
4014
  def CheckArguments(self):
4015
    self.qcls = _GetQueryImplementation(self.op.what)
4016

    
4017
  def ExpandNames(self):
4018
    self.needed_locks = {}
4019

    
4020
  def Exec(self, feedback_fn):
4021
    return self.qcls.FieldsQuery(self.op.fields)
4022

    
4023

    
4024
class LUNodeModifyStorage(NoHooksLU):
4025
  """Logical unit for modifying a storage volume on a node.
4026

4027
  """
4028
  REQ_BGL = False
4029

    
4030
  def CheckArguments(self):
4031
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4032

    
4033
    storage_type = self.op.storage_type
4034

    
4035
    try:
4036
      modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
4037
    except KeyError:
4038
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
4039
                                 " modified" % storage_type,
4040
                                 errors.ECODE_INVAL)
4041

    
4042
    diff = set(self.op.changes.keys()) - modifiable
4043
    if diff:
4044
      raise errors.OpPrereqError("The following fields can not be modified for"
4045
                                 " storage units of type '%s': %r" %
4046
                                 (storage_type, list(diff)),
4047
                                 errors.ECODE_INVAL)
4048

    
4049
  def ExpandNames(self):
4050
    self.needed_locks = {
4051
      locking.LEVEL_NODE: self.op.node_name,
4052
      }
4053

    
4054
  def Exec(self, feedback_fn):
4055
    """Computes the list of nodes and their attributes.
4056

4057
    """
4058
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
4059
    result = self.rpc.call_storage_modify(self.op.node_name,
4060
                                          self.op.storage_type, st_args,
4061
                                          self.op.name, self.op.changes)
4062
    result.Raise("Failed to modify storage unit '%s' on %s" %
4063
                 (self.op.name, self.op.node_name))
4064

    
4065

    
4066
class LUNodeAdd(LogicalUnit):
4067
  """Logical unit for adding node to the cluster.
4068

4069
  """
4070
  HPATH = "node-add"
4071
  HTYPE = constants.HTYPE_NODE
4072
  _NFLAGS = ["master_capable", "vm_capable"]
4073

    
4074
  def CheckArguments(self):
4075
    self.primary_ip_family = self.cfg.GetPrimaryIPFamily()
4076
    # validate/normalize the node name
4077
    self.hostname = netutils.GetHostname(name=self.op.node_name,
4078
                                         family=self.primary_ip_family)
4079
    self.op.node_name = self.hostname.name
4080
    if self.op.readd and self.op.group:
4081
      raise errors.OpPrereqError("Cannot pass a node group when a node is"
4082
                                 " being readded", errors.ECODE_INVAL)
4083

    
4084
  def BuildHooksEnv(self):
4085
    """Build hooks env.
4086

4087
    This will run on all nodes before, and on all nodes + the new node after.
4088

4089
    """
4090
    env = {
4091
      "OP_TARGET": self.op.node_name,
4092
      "NODE_NAME": self.op.node_name,
4093
      "NODE_PIP": self.op.primary_ip,
4094
      "NODE_SIP": self.op.secondary_ip,
4095
      "MASTER_CAPABLE": str(self.op.master_capable),
4096
      "VM_CAPABLE": str(self.op.vm_capable),
4097
      }
4098
    nodes_0 = self.cfg.GetNodeList()
4099
    nodes_1 = nodes_0 + [self.op.node_name, ]
4100
    return env, nodes_0, nodes_1
4101

    
4102
  def CheckPrereq(self):
4103
    """Check prerequisites.
4104

4105
    This checks:
4106
     - the new node is not already in the config
4107
     - it is resolvable
4108
     - its parameters (single/dual homed) matches the cluster
4109

4110
    Any errors are signaled by raising errors.OpPrereqError.
4111

4112
    """
4113
    cfg = self.cfg
4114
    hostname = self.hostname
4115
    node = hostname.name
4116
    primary_ip = self.op.primary_ip = hostname.ip
4117
    if self.op.secondary_ip is None:
4118
      if self.primary_ip_family == netutils.IP6Address.family:
4119
        raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
4120
                                   " IPv4 address must be given as secondary",
4121
                                   errors.ECODE_INVAL)
4122
      self.op.secondary_ip = primary_ip
4123

    
4124
    secondary_ip = self.op.secondary_ip
4125
    if not netutils.IP4Address.IsValid(secondary_ip):
4126
      raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
4127
                                 " address" % secondary_ip, errors.ECODE_INVAL)
4128

    
4129
    node_list = cfg.GetNodeList()
4130
    if not self.op.readd and node in node_list:
4131
      raise errors.OpPrereqError("Node %s is already in the configuration" %
4132
                                 node, errors.ECODE_EXISTS)
4133
    elif self.op.readd and node not in node_list:
4134
      raise errors.OpPrereqError("Node %s is not in the configuration" % node,
4135
                                 errors.ECODE_NOENT)
4136

    
4137
    self.changed_primary_ip = False
4138

    
4139
    for existing_node_name in node_list:
4140
      existing_node = cfg.GetNodeInfo(existing_node_name)
4141

    
4142
      if self.op.readd and node == existing_node_name:
4143
        if existing_node.secondary_ip != secondary_ip:
4144
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
4145
                                     " address configuration as before",
4146
                                     errors.ECODE_INVAL)
4147
        if existing_node.primary_ip != primary_ip:
4148
          self.changed_primary_ip = True
4149

    
4150
        continue
4151

    
4152
      if (existing_node.primary_ip == primary_ip or
4153
          existing_node.secondary_ip == primary_ip or
4154
          existing_node.primary_ip == secondary_ip or
4155
          existing_node.secondary_ip == secondary_ip):
4156
        raise errors.OpPrereqError("New node ip address(es) conflict with"
4157
                                   " existing node %s" % existing_node.name,
4158
                                   errors.ECODE_NOTUNIQUE)
4159

    
4160
    # After this 'if' block, None is no longer a valid value for the
4161
    # _capable op attributes
4162
    if self.op.readd:
4163
      old_node = self.cfg.GetNodeInfo(node)
4164
      assert old_node is not None, "Can't retrieve locked node %s" % node
4165
      for attr in self._NFLAGS:
4166
        if getattr(self.op, attr) is None:
4167
          setattr(self.op, attr, getattr(old_node, attr))
4168
    else:
4169
      for attr in self._NFLAGS:
4170
        if getattr(self.op, attr) is None:
4171
          setattr(self.op, attr, True)
4172

    
4173
    if self.op.readd and not self.op.vm_capable:
4174
      pri, sec = cfg.GetNodeInstances(node)
4175
      if pri or sec:
4176
        raise errors.OpPrereqError("Node %s being re-added with vm_capable"
4177
                                   " flag set to false, but it already holds"
4178
                                   " instances" % node,
4179
                                   errors.ECODE_STATE)
4180

    
4181
    # check that the type of the node (single versus dual homed) is the
4182
    # same as for the master
4183
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
4184
    master_singlehomed = myself.secondary_ip == myself.primary_ip
4185
    newbie_singlehomed = secondary_ip == primary_ip
4186
    if master_singlehomed != newbie_singlehomed:
4187
      if master_singlehomed:
4188
        raise errors.OpPrereqError("The master has no secondary ip but the"
4189
                                   " new node has one",
4190
                                   errors.ECODE_INVAL)
4191
      else:
4192
        raise errors.OpPrereqError("The master has a secondary ip but the"
4193
                                   " new node doesn't have one",
4194
                                   errors.ECODE_INVAL)
4195

    
4196
    # checks reachability
4197
    if not netutils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
4198
      raise errors.OpPrereqError("Node not reachable by ping",
4199
                                 errors.ECODE_ENVIRON)
4200

    
4201
    if not newbie_singlehomed:
4202
      # check reachability from my secondary ip to newbie's secondary ip
4203
      if not netutils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
4204
                           source=myself.secondary_ip):
4205
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
4206
                                   " based ping to node daemon port",
4207
                                   errors.ECODE_ENVIRON)
4208

    
4209
    if self.op.readd:
4210
      exceptions = [node]
4211
    else:
4212
      exceptions = []
4213

    
4214
    if self.op.master_capable:
4215
      self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
4216
    else:
4217
      self.master_candidate = False
4218

    
4219
    if self.op.readd:
4220
      self.new_node = old_node
4221
    else:
4222
      node_group = cfg.LookupNodeGroup(self.op.group)
4223
      self.new_node = objects.Node(name=node,
4224
                                   primary_ip=primary_ip,
4225
                                   secondary_ip=secondary_ip,
4226
                                   master_candidate=self.master_candidate,
4227
                                   offline=False, drained=False,
4228
                                   group=node_group)
4229

    
4230
    if self.op.ndparams:
4231
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
4232

    
4233
  def Exec(self, feedback_fn):
4234
    """Adds the new node to the cluster.
4235

4236
    """
4237
    new_node = self.new_node
4238
    node = new_node.name
4239

    
4240
    # We adding a new node so we assume it's powered
4241
    new_node.powered = True
4242

    
4243
    # for re-adds, reset the offline/drained/master-candidate flags;
4244
    # we need to reset here, otherwise offline would prevent RPC calls
4245
    # later in the procedure; this also means that if the re-add
4246
    # fails, we are left with a non-offlined, broken node
4247
    if self.op.readd:
4248
      new_node.drained = new_node.offline = False # pylint: disable-msg=W0201
4249
      self.LogInfo("Readding a node, the offline/drained flags were reset")
4250
      # if we demote the node, we do cleanup later in the procedure
4251
      new_node.master_candidate = self.master_candidate
4252
      if self.changed_primary_ip:
4253
        new_node.primary_ip = self.op.primary_ip
4254

    
4255
    # copy the master/vm_capable flags
4256
    for attr in self._NFLAGS:
4257
      setattr(new_node, attr, getattr(self.op, attr))
4258

    
4259
    # notify the user about any possible mc promotion
4260
    if new_node.master_candidate:
4261
      self.LogInfo("Node will be a master candidate")
4262

    
4263
    if self.op.ndparams:
4264
      new_node.ndparams = self.op.ndparams
4265
    else:
4266
      new_node.ndparams = {}
4267

    
4268
    # check connectivity
4269
    result = self.rpc.call_version([node])[node]
4270
    result.Raise("Can't get version information from node %s" % node)
4271
    if constants.PROTOCOL_VERSION == result.payload:
4272
      logging.info("Communication to node %s fine, sw version %s match",
4273
                   node, result.payload)
4274
    else:
4275
      raise errors.OpExecError("Version mismatch master version %s,"
4276
                               " node version %s" %
4277
                               (constants.PROTOCOL_VERSION, result.payload))
4278

    
4279
    # Add node to our /etc/hosts, and add key to known_hosts
4280
    if self.cfg.GetClusterInfo().modify_etc_hosts:
4281
      master_node = self.cfg.GetMasterNode()
4282
      result = self.rpc.call_etc_hosts_modify(master_node,
4283
                                              constants.ETC_HOSTS_ADD,
4284
                                              self.hostname.name,
4285
                                              self.hostname.ip)
4286
      result.Raise("Can't update hosts file with new host data")
4287

    
4288
    if new_node.secondary_ip != new_node.primary_ip:
4289
      _CheckNodeHasSecondaryIP(self, new_node.name, new_node.secondary_ip,
4290
                               False)
4291

    
4292
    node_verify_list = [self.cfg.GetMasterNode()]
4293
    node_verify_param = {
4294
      constants.NV_NODELIST: [node],
4295
      # TODO: do a node-net-test as well?
4296
    }
4297

    
4298
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
4299
                                       self.cfg.GetClusterName())
4300
    for verifier in node_verify_list:
4301
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
4302
      nl_payload = result[verifier].payload[constants.NV_NODELIST]
4303
      if nl_payload:
4304
        for failed in nl_payload:
4305
          feedback_fn("ssh/hostname verification failed"
4306
                      " (checking from %s): %s" %
4307
                      (verifier, nl_payload[failed]))
4308
        raise errors.OpExecError("ssh/hostname verification failed.")
4309

    
4310
    if self.op.readd:
4311
      _RedistributeAncillaryFiles(self)
4312
      self.context.ReaddNode(new_node)
4313
      # make sure we redistribute the config
4314
      self.cfg.Update(new_node, feedback_fn)
4315
      # and make sure the new node will not have old files around
4316
      if not new_node.master_candidate:
4317
        result = self.rpc.call_node_demote_from_mc(new_node.name)
4318
        msg = result.fail_msg
4319
        if msg:
4320
          self.LogWarning("Node failed to demote itself from master"
4321
                          " candidate status: %s" % msg)
4322
    else:
4323
      _RedistributeAncillaryFiles(self, additional_nodes=[node],
4324
                                  additional_vm=self.op.vm_capable)
4325
      self.context.AddNode(new_node, self.proc.GetECId())
4326

    
4327

    
4328
class LUNodeSetParams(LogicalUnit):
4329
  """Modifies the parameters of a node.
4330

4331
  @cvar _F2R: a dictionary from tuples of flags (mc, drained, offline)
4332
      to the node role (as _ROLE_*)
4333
  @cvar _R2F: a dictionary from node role to tuples of flags
4334
  @cvar _FLAGS: a list of attribute names corresponding to the flags
4335

4336
  """
4337
  HPATH = "node-modify"
4338
  HTYPE = constants.HTYPE_NODE
4339
  REQ_BGL = False
4340
  (_ROLE_CANDIDATE, _ROLE_DRAINED, _ROLE_OFFLINE, _ROLE_REGULAR) = range(4)
4341
  _F2R = {
4342
    (True, False, False): _ROLE_CANDIDATE,
4343
    (False, True, False): _ROLE_DRAINED,
4344
    (False, False, True): _ROLE_OFFLINE,
4345
    (False, False, False): _ROLE_REGULAR,
4346
    }
4347
  _R2F = dict((v, k) for k, v in _F2R.items())
4348
  _FLAGS = ["master_candidate", "drained", "offline"]
4349

    
4350
  def CheckArguments(self):
4351
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4352
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained,
4353
                self.op.master_capable, self.op.vm_capable,
4354
                self.op.secondary_ip, self.op.ndparams]
4355
    if all_mods.count(None) == len(all_mods):
4356
      raise errors.OpPrereqError("Please pass at least one modification",
4357
                                 errors.ECODE_INVAL)
4358
    if all_mods.count(True) > 1:
4359
      raise errors.OpPrereqError("Can't set the node into more than one"
4360
                                 " state at the same time",
4361
                                 errors.ECODE_INVAL)
4362

    
4363
    # Boolean value that tells us whether we might be demoting from MC
4364
    self.might_demote = (self.op.master_candidate == False or
4365
                         self.op.offline == True or
4366
                         self.op.drained == True or
4367
                         self.op.master_capable == False)
4368

    
4369
    if self.op.secondary_ip:
4370
      if not netutils.IP4Address.IsValid(self.op.secondary_ip):
4371
        raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
4372
                                   " address" % self.op.secondary_ip,
4373
                                   errors.ECODE_INVAL)
4374

    
4375
    self.lock_all = self.op.auto_promote and self.might_demote
4376
    self.lock_instances = self.op.secondary_ip is not None
4377

    
4378
  def ExpandNames(self):
4379
    if self.lock_all:
4380
      self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
4381
    else:
4382
      self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
4383

    
4384
    if self.lock_instances:
4385
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
4386

    
4387
  def DeclareLocks(self, level):
4388
    # If we have locked all instances, before waiting to lock nodes, release
4389
    # all the ones living on nodes unrelated to the current operation.
4390
    if level == locking.LEVEL_NODE and self.lock_instances:
4391
      instances_release = []
4392
      instances_keep = []
4393
      self.affected_instances = []
4394
      if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4395
        for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
4396
          instance = self.context.cfg.GetInstanceInfo(instance_name)
4397
          i_mirrored = instance.disk_template in constants.DTS_NET_MIRROR
4398
          if i_mirrored and self.op.node_name in instance.all_nodes:
4399
            instances_keep.append(instance_name)
4400
            self.affected_instances.append(instance)
4401
          else:
4402
            instances_release.append(instance_name)
4403
        if instances_release:
4404
          self.context.glm.release(locking.LEVEL_INSTANCE, instances_release)
4405
          self.acquired_locks[locking.LEVEL_INSTANCE] = instances_keep
4406

    
4407
  def BuildHooksEnv(self):
4408
    """Build hooks env.
4409

4410
    This runs on the master node.
4411

4412
    """
4413
    env = {
4414
      "OP_TARGET": self.op.node_name,
4415
      "MASTER_CANDIDATE": str(self.op.master_candidate),
4416
      "OFFLINE": str(self.op.offline),
4417
      "DRAINED": str(self.op.drained),
4418
      "MASTER_CAPABLE": str(self.op.master_capable),
4419
      "VM_CAPABLE": str(self.op.vm_capable),
4420
      }
4421
    nl = [self.cfg.GetMasterNode(),
4422
          self.op.node_name]
4423
    return env, nl, nl
4424

    
4425
  def CheckPrereq(self):
4426
    """Check prerequisites.
4427

4428
    This only checks the instance list against the existing names.
4429

4430
    """
4431
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
4432

    
4433
    if (self.op.master_candidate is not None or
4434
        self.op.drained is not None or
4435
        self.op.offline is not None):
4436
      # we can't change the master's node flags
4437
      if self.op.node_name == self.cfg.GetMasterNode():
4438
        raise errors.OpPrereqError("The master role can be changed"
4439
                                   " only via master-failover",
4440
                                   errors.ECODE_INVAL)
4441

    
4442
    if self.op.master_candidate and not node.master_capable:
4443
      raise errors.OpPrereqError("Node %s is not master capable, cannot make"
4444
                                 " it a master candidate" % node.name,
4445
                                 errors.ECODE_STATE)
4446

    
4447
    if self.op.vm_capable == False:
4448
      (ipri, isec) = self.cfg.GetNodeInstances(self.op.node_name)
4449
      if ipri or isec:
4450
        raise errors.OpPrereqError("Node %s hosts instances, cannot unset"
4451
                                   " the vm_capable flag" % node.name,
4452
                                   errors.ECODE_STATE)
4453

    
4454
    if node.master_candidate and self.might_demote and not self.lock_all:
4455
      assert not self.op.auto_promote, "auto_promote set but lock_all not"
4456
      # check if after removing the current node, we're missing master
4457
      # candidates
4458
      (mc_remaining, mc_should, _) = \
4459
          self.cfg.GetMasterCandidateStats(exceptions=[node.name])
4460
      if mc_remaining < mc_should:
4461
        raise errors.OpPrereqError("Not enough master candidates, please"
4462
                                   " pass auto promote option to allow"
4463
                                   " promotion", errors.ECODE_STATE)
4464

    
4465
    self.old_flags = old_flags = (node.master_candidate,
4466
                                  node.drained, node.offline)
4467
    assert old_flags in self._F2R, "Un-handled old flags  %s" % str(old_flags)
4468
    self.old_role = old_role = self._F2R[old_flags]
4469

    
4470
    # Check for ineffective changes
4471
    for attr in self._FLAGS:
4472
      if (getattr(self.op, attr) == False and getattr(node, attr) == False):
4473
        self.LogInfo("Ignoring request to unset flag %s, already unset", attr)
4474
        setattr(self.op, attr, None)
4475

    
4476
    # Past this point, any flag change to False means a transition
4477
    # away from the respective state, as only real changes are kept
4478

    
4479
    # TODO: We might query the real power state if it supports OOB
4480
    if _SupportsOob(self.cfg, node):
4481
      if self.op.offline is False and not (node.powered or
4482
                                           self.op.powered == True):
4483
        raise errors.OpPrereqError(("Please power on node %s first before you"
4484
                                    " can reset offline state") %
4485
                                   self.op.node_name)
4486
    elif self.op.powered is not None:
4487
      raise errors.OpPrereqError(("Unable to change powered state for node %s"
4488
                                  " which does not support out-of-band"
4489
                                  " handling") % self.op.node_name)
4490

    
4491
    # If we're being deofflined/drained, we'll MC ourself if needed
4492
    if (self.op.drained == False or self.op.offline == False or
4493
        (self.op.master_capable and not node.master_capable)):
4494
      if _DecideSelfPromotion(self):
4495
        self.op.master_candidate = True
4496
        self.LogInfo("Auto-promoting node to master candidate")
4497

    
4498
    # If we're no longer master capable, we'll demote ourselves from MC
4499
    if self.op.master_capable == False and node.master_candidate:
4500
      self.LogInfo("Demoting from master candidate")
4501
      self.op.master_candidate = False
4502

    
4503
    # Compute new role
4504
    assert [getattr(self.op, attr) for attr in self._FLAGS].count(True) <= 1
4505
    if self.op.master_candidate:
4506
      new_role = self._ROLE_CANDIDATE
4507
    elif self.op.drained:
4508
      new_role = self._ROLE_DRAINED
4509
    elif self.op.offline:
4510
      new_role = self._ROLE_OFFLINE
4511
    elif False in [self.op.master_candidate, self.op.drained, self.op.offline]:
4512
      # False is still in new flags, which means we're un-setting (the
4513
      # only) True flag
4514
      new_role = self._ROLE_REGULAR
4515
    else: # no new flags, nothing, keep old role
4516
      new_role = old_role
4517

    
4518
    self.new_role = new_role
4519

    
4520
    if old_role == self._ROLE_OFFLINE and new_role != old_role:
4521
      # Trying to transition out of offline status
4522
      result = self.rpc.call_version([node.name])[node.name]
4523
      if result.fail_msg:
4524
        raise errors.OpPrereqError("Node %s is being de-offlined but fails"
4525
                                   " to report its version: %s" %
4526
                                   (node.name, result.fail_msg),
4527
                                   errors.ECODE_STATE)
4528
      else:
4529
        self.LogWarning("Transitioning node from offline to online state"
4530
                        " without using re-add. Please make sure the node"
4531
                        " is healthy!")
4532

    
4533
    if self.op.secondary_ip:
4534
      # Ok even without locking, because this can't be changed by any LU
4535
      master = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
4536
      master_singlehomed = master.secondary_ip == master.primary_ip
4537
      if master_singlehomed and self.op.secondary_ip:
4538
        raise errors.OpPrereqError("Cannot change the secondary ip on a single"
4539
                                   " homed cluster", errors.ECODE_INVAL)
4540

    
4541
      if node.offline:
4542
        if self.affected_instances:
4543
          raise errors.OpPrereqError("Cannot change secondary ip: offline"
4544
                                     " node has instances (%s) configured"
4545
                                     " to use it" % self.affected_instances)
4546
      else:
4547
        # On online nodes, check that no instances are running, and that
4548
        # the node has the new ip and we can reach it.
4549
        for instance in self.affected_instances:
4550
          _CheckInstanceDown(self, instance, "cannot change secondary ip")
4551

    
4552
        _CheckNodeHasSecondaryIP(self, node.name, self.op.secondary_ip, True)
4553
        if master.name != node.name:
4554
          # check reachability from master secondary ip to new secondary ip
4555
          if not netutils.TcpPing(self.op.secondary_ip,
4556
                                  constants.DEFAULT_NODED_PORT,
4557
                                  source=master.secondary_ip):
4558
            raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
4559
                                       " based ping to node daemon port",
4560
                                       errors.ECODE_ENVIRON)
4561

    
4562
    if self.op.ndparams:
4563
      new_ndparams = _GetUpdatedParams(self.node.ndparams, self.op.ndparams)
4564
      utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
4565
      self.new_ndparams = new_ndparams
4566

    
4567
  def Exec(self, feedback_fn):
4568
    """Modifies a node.
4569

4570
    """
4571
    node = self.node
4572
    old_role = self.old_role
4573
    new_role = self.new_role
4574

    
4575
    result = []
4576

    
4577
    if self.op.ndparams:
4578
      node.ndparams = self.new_ndparams
4579

    
4580
    if self.op.powered is not None:
4581
      node.powered = self.op.powered
4582

    
4583
    for attr in ["master_capable", "vm_capable"]:
4584
      val = getattr(self.op, attr)
4585
      if val is not None:
4586
        setattr(node, attr, val)
4587
        result.append((attr, str(val)))
4588

    
4589
    if new_role != old_role:
4590
      # Tell the node to demote itself, if no longer MC and not offline
4591
      if old_role == self._ROLE_CANDIDATE and new_role != self._ROLE_OFFLINE:
4592
        msg = self.rpc.call_node_demote_from_mc(node.name).fail_msg
4593
        if msg:
4594
          self.LogWarning("Node failed to demote itself: %s", msg)
4595

    
4596
      new_flags = self._R2F[new_role]
4597
      for of, nf, desc in zip(self.old_flags, new_flags, self._FLAGS):
4598
        if of != nf:
4599
          result.append((desc, str(nf)))
4600
      (node.master_candidate, node.drained, node.offline) = new_flags
4601

    
4602
      # we locked all nodes, we adjust the CP before updating this node
4603
      if self.lock_all:
4604
        _AdjustCandidatePool(self, [node.name])
4605

    
4606
    if self.op.secondary_ip:
4607
      node.secondary_ip = self.op.secondary_ip
4608
      result.append(("secondary_ip", self.op.secondary_ip))
4609

    
4610
    # this will trigger configuration file update, if needed
4611
    self.cfg.Update(node, feedback_fn)
4612

    
4613
    # this will trigger job queue propagation or cleanup if the mc
4614
    # flag changed
4615
    if [old_role, new_role].count(self._ROLE_CANDIDATE) == 1:
4616
      self.context.ReaddNode(node)
4617

    
4618
    return result
4619

    
4620

    
4621
class LUNodePowercycle(NoHooksLU):
4622
  """Powercycles a node.
4623

4624
  """
4625
  REQ_BGL = False
4626

    
4627
  def CheckArguments(self):
4628
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4629
    if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
4630
      raise errors.OpPrereqError("The node is the master and the force"
4631
                                 " parameter was not set",
4632
                                 errors.ECODE_INVAL)
4633

    
4634
  def ExpandNames(self):
4635
    """Locking for PowercycleNode.
4636

4637
    This is a last-resort option and shouldn't block on other
4638
    jobs. Therefore, we grab no locks.
4639

4640
    """
4641
    self.needed_locks = {}
4642

    
4643
  def Exec(self, feedback_fn):
4644
    """Reboots a node.
4645

4646
    """
4647
    result = self.rpc.call_node_powercycle(self.op.node_name,
4648
                                           self.cfg.GetHypervisorType())
4649
    result.Raise("Failed to schedule the reboot")
4650
    return result.payload
4651

    
4652

    
4653
class LUClusterQuery(NoHooksLU):
4654
  """Query cluster configuration.
4655

4656
  """
4657
  REQ_BGL = False
4658

    
4659
  def ExpandNames(self):
4660
    self.needed_locks = {}
4661

    
4662
  def Exec(self, feedback_fn):
4663
    """Return cluster config.
4664

4665
    """
4666
    cluster = self.cfg.GetClusterInfo()
4667
    os_hvp = {}
4668

    
4669
    # Filter just for enabled hypervisors
4670
    for os_name, hv_dict in cluster.os_hvp.items():
4671
      os_hvp[os_name] = {}
4672
      for hv_name, hv_params in hv_dict.items():
4673
        if hv_name in cluster.enabled_hypervisors:
4674
          os_hvp[os_name][hv_name] = hv_params
4675

    
4676
    # Convert ip_family to ip_version
4677
    primary_ip_version = constants.IP4_VERSION
4678
    if cluster.primary_ip_family == netutils.IP6Address.family:
4679
      primary_ip_version = constants.IP6_VERSION
4680

    
4681
    result = {
4682
      "software_version": constants.RELEASE_VERSION,
4683
      "protocol_version": constants.PROTOCOL_VERSION,
4684
      "config_version": constants.CONFIG_VERSION,
4685
      "os_api_version": max(constants.OS_API_VERSIONS),
4686
      "export_version": constants.EXPORT_VERSION,
4687
      "architecture": (platform.architecture()[0], platform.machine()),
4688
      "name": cluster.cluster_name,
4689
      "master": cluster.master_node,
4690
      "default_hypervisor": cluster.enabled_hypervisors[0],
4691
      "enabled_hypervisors": cluster.enabled_hypervisors,
4692
      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
4693
                        for hypervisor_name in cluster.enabled_hypervisors]),
4694
      "os_hvp": os_hvp,
4695
      "beparams": cluster.beparams,
4696
      "osparams": cluster.osparams,
4697
      "nicparams": cluster.nicparams,
4698
      "ndparams": cluster.ndparams,
4699
      "candidate_pool_size": cluster.candidate_pool_size,
4700
      "master_netdev": cluster.master_netdev,
4701
      "volume_group_name": cluster.volume_group_name,
4702
      "drbd_usermode_helper": cluster.drbd_usermode_helper,
4703
      "file_storage_dir": cluster.file_storage_dir,
4704
      "shared_file_storage_dir": cluster.shared_file_storage_dir,
4705
      "maintain_node_health": cluster.maintain_node_health,
4706
      "ctime": cluster.ctime,
4707
      "mtime": cluster.mtime,
4708
      "uuid": cluster.uuid,
4709
      "tags": list(cluster.GetTags()),
4710
      "uid_pool": cluster.uid_pool,
4711
      "default_iallocator": cluster.default_iallocator,
4712
      "reserved_lvs": cluster.reserved_lvs,
4713
      "primary_ip_version": primary_ip_version,
4714
      "prealloc_wipe_disks": cluster.prealloc_wipe_disks,
4715
      "hidden_os": cluster.hidden_os,
4716
      "blacklisted_os": cluster.blacklisted_os,
4717
      }
4718

    
4719
    return result
4720

    
4721

    
4722
class LUClusterConfigQuery(NoHooksLU):
4723
  """Return configuration values.
4724

4725
  """
4726
  REQ_BGL = False
4727
  _FIELDS_DYNAMIC = utils.FieldSet()
4728
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag",
4729
                                  "watcher_pause", "volume_group_name")
4730

    
4731
  def CheckArguments(self):
4732
    _CheckOutputFields(static=self._FIELDS_STATIC,
4733
                       dynamic=self._FIELDS_DYNAMIC,
4734
                       selected=self.op.output_fields)
4735

    
4736
  def ExpandNames(self):
4737
    self.needed_locks = {}
4738

    
4739
  def Exec(self, feedback_fn):
4740
    """Dump a representation of the cluster config to the standard output.
4741

4742
    """
4743
    values = []
4744
    for field in self.op.output_fields:
4745
      if field == "cluster_name":
4746
        entry = self.cfg.GetClusterName()
4747
      elif field == "master_node":
4748
        entry = self.cfg.GetMasterNode()
4749
      elif field == "drain_flag":
4750
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
4751
      elif field == "watcher_pause":
4752
        entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
4753
      elif field == "volume_group_name":
4754
        entry = self.cfg.GetVGName()
4755
      else:
4756
        raise errors.ParameterError(field)
4757
      values.append(entry)
4758
    return values
4759

    
4760

    
4761
class LUInstanceActivateDisks(NoHooksLU):
4762
  """Bring up an instance's disks.
4763

4764
  """
4765
  REQ_BGL = False
4766

    
4767
  def ExpandNames(self):
4768
    self._ExpandAndLockInstance()
4769
    self.needed_locks[locking.LEVEL_NODE] = []
4770
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4771

    
4772
  def DeclareLocks(self, level):
4773
    if level == locking.LEVEL_NODE:
4774
      self._LockInstancesNodes()
4775

    
4776
  def CheckPrereq(self):
4777
    """Check prerequisites.
4778

4779
    This checks that the instance is in the cluster.
4780

4781
    """
4782
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4783
    assert self.instance is not None, \
4784
      "Cannot retrieve locked instance %s" % self.op.instance_name
4785
    _CheckNodeOnline(self, self.instance.primary_node)
4786

    
4787
  def Exec(self, feedback_fn):
4788
    """Activate the disks.
4789

4790
    """
4791
    disks_ok, disks_info = \
4792
              _AssembleInstanceDisks(self, self.instance,
4793
                                     ignore_size=self.op.ignore_size)
4794
    if not disks_ok:
4795
      raise errors.OpExecError("Cannot activate block devices")
4796

    
4797
    return disks_info
4798

    
4799

    
4800
def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
4801
                           ignore_size=False):
4802
  """Prepare the block devices for an instance.
4803

4804
  This sets up the block devices on all nodes.
4805

4806
  @type lu: L{LogicalUnit}
4807
  @param lu: the logical unit on whose behalf we execute
4808
  @type instance: L{objects.Instance}
4809
  @param instance: the instance for whose disks we assemble
4810
  @type disks: list of L{objects.Disk} or None
4811
  @param disks: which disks to assemble (or all, if None)
4812
  @type ignore_secondaries: boolean
4813
  @param ignore_secondaries: if true, errors on secondary nodes
4814
      won't result in an error return from the function
4815
  @type ignore_size: boolean
4816
  @param ignore_size: if true, the current known size of the disk
4817
      will not be used during the disk activation, useful for cases
4818
      when the size is wrong
4819
  @return: False if the operation failed, otherwise a list of
4820
      (host, instance_visible_name, node_visible_name)
4821
      with the mapping from node devices to instance devices
4822

4823
  """
4824
  device_info = []
4825
  disks_ok = True
4826
  iname = instance.name
4827
  disks = _ExpandCheckDisks(instance, disks)
4828

    
4829
  # With the two passes mechanism we try to reduce the window of
4830
  # opportunity for the race condition of switching DRBD to primary
4831
  # before handshaking occured, but we do not eliminate it
4832

    
4833
  # The proper fix would be to wait (with some limits) until the
4834
  # connection has been made and drbd transitions from WFConnection
4835
  # into any other network-connected state (Connected, SyncTarget,
4836
  # SyncSource, etc.)
4837

    
4838
  # 1st pass, assemble on all nodes in secondary mode
4839
  for idx, inst_disk in enumerate(disks):
4840
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
4841
      if ignore_size:
4842
        node_disk = node_disk.Copy()
4843
        node_disk.UnsetSize()
4844
      lu.cfg.SetDiskID(node_disk, node)
4845
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False, idx)
4846
      msg = result.fail_msg
4847
      if msg:
4848
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
4849
                           " (is_primary=False, pass=1): %s",
4850
                           inst_disk.iv_name, node, msg)
4851
        if not ignore_secondaries:
4852
          disks_ok = False
4853

    
4854
  # FIXME: race condition on drbd migration to primary
4855

    
4856
  # 2nd pass, do only the primary node
4857
  for idx, inst_disk in enumerate(disks):
4858
    dev_path = None
4859

    
4860
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
4861
      if node != instance.primary_node:
4862
        continue
4863
      if ignore_size:
4864
        node_disk = node_disk.Copy()
4865
        node_disk.UnsetSize()
4866
      lu.cfg.SetDiskID(node_disk, node)
4867
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True, idx)
4868
      msg = result.fail_msg
4869
      if msg:
4870
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
4871
                           " (is_primary=True, pass=2): %s",
4872
                           inst_disk.iv_name, node, msg)
4873
        disks_ok = False
4874
      else:
4875
        dev_path = result.payload
4876

    
4877
    device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
4878

    
4879
  # leave the disks configured for the primary node
4880
  # this is a workaround that would be fixed better by
4881
  # improving the logical/physical id handling
4882
  for disk in disks:
4883
    lu.cfg.SetDiskID(disk, instance.primary_node)
4884

    
4885
  return disks_ok, device_info
4886

    
4887

    
4888
def _StartInstanceDisks(lu, instance, force):
4889
  """Start the disks of an instance.
4890

4891
  """
4892
  disks_ok, _ = _AssembleInstanceDisks(lu, instance,
4893
                                           ignore_secondaries=force)
4894
  if not disks_ok:
4895
    _ShutdownInstanceDisks(lu, instance)
4896
    if force is not None and not force:
4897
      lu.proc.LogWarning("", hint="If the message above refers to a"
4898
                         " secondary node,"
4899
                         " you can retry the operation using '--force'.")
4900
    raise errors.OpExecError("Disk consistency error")
4901

    
4902

    
4903
class LUInstanceDeactivateDisks(NoHooksLU):
4904
  """Shutdown an instance's disks.
4905

4906
  """
4907
  REQ_BGL = False
4908

    
4909
  def ExpandNames(self):
4910
    self._ExpandAndLockInstance()
4911
    self.needed_locks[locking.LEVEL_NODE] = []
4912
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4913

    
4914
  def DeclareLocks(self, level):
4915
    if level == locking.LEVEL_NODE:
4916
      self._LockInstancesNodes()
4917

    
4918
  def CheckPrereq(self):
4919
    """Check prerequisites.
4920

4921
    This checks that the instance is in the cluster.
4922

4923
    """
4924
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4925
    assert self.instance is not None, \
4926
      "Cannot retrieve locked instance %s" % self.op.instance_name
4927

    
4928
  def Exec(self, feedback_fn):
4929
    """Deactivate the disks
4930

4931
    """
4932
    instance = self.instance
4933
    if self.op.force:
4934
      _ShutdownInstanceDisks(self, instance)
4935
    else:
4936
      _SafeShutdownInstanceDisks(self, instance)
4937

    
4938

    
4939
def _SafeShutdownInstanceDisks(lu, instance, disks=None):
4940
  """Shutdown block devices of an instance.
4941

4942
  This function checks if an instance is running, before calling
4943
  _ShutdownInstanceDisks.
4944

4945
  """
4946
  _CheckInstanceDown(lu, instance, "cannot shutdown disks")
4947
  _ShutdownInstanceDisks(lu, instance, disks=disks)
4948

    
4949

    
4950
def _ExpandCheckDisks(instance, disks):
4951
  """Return the instance disks selected by the disks list
4952

4953
  @type disks: list of L{objects.Disk} or None
4954
  @param disks: selected disks
4955
  @rtype: list of L{objects.Disk}
4956
  @return: selected instance disks to act on
4957

4958
  """
4959
  if disks is None:
4960
    return instance.disks
4961
  else:
4962
    if not set(disks).issubset(instance.disks):
4963
      raise errors.ProgrammerError("Can only act on disks belonging to the"
4964
                                   " target instance")
4965
    return disks
4966

    
4967

    
4968
def _ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
4969
  """Shutdown block devices of an instance.
4970

4971
  This does the shutdown on all nodes of the instance.
4972

4973
  If the ignore_primary is false, errors on the primary node are
4974
  ignored.
4975

4976
  """
4977
  all_result = True
4978
  disks = _ExpandCheckDisks(instance, disks)
4979

    
4980
  for disk in disks:
4981
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
4982
      lu.cfg.SetDiskID(top_disk, node)
4983
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
4984
      msg = result.fail_msg
4985
      if msg:
4986
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
4987
                      disk.iv_name, node, msg)
4988
        if ((node == instance.primary_node and not ignore_primary) or
4989
            (node != instance.primary_node and not result.offline)):
4990
          all_result = False
4991
  return all_result
4992

    
4993

    
4994
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
4995
  """Checks if a node has enough free memory.
4996

4997
  This function check if a given node has the needed amount of free
4998
  memory. In case the node has less memory or we cannot get the
4999
  information from the node, this function raise an OpPrereqError
5000
  exception.
5001

5002
  @type lu: C{LogicalUnit}
5003
  @param lu: a logical unit from which we get configuration data
5004
  @type node: C{str}
5005
  @param node: the node to check
5006
  @type reason: C{str}
5007
  @param reason: string to use in the error message
5008
  @type requested: C{int}
5009
  @param requested: the amount of memory in MiB to check for
5010
  @type hypervisor_name: C{str}
5011
  @param hypervisor_name: the hypervisor to ask for memory stats
5012
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
5013
      we cannot check the node
5014

5015
  """
5016
  nodeinfo = lu.rpc.call_node_info([node], None, hypervisor_name)
5017
  nodeinfo[node].Raise("Can't get data from node %s" % node,
5018
                       prereq=True, ecode=errors.ECODE_ENVIRON)
5019
  free_mem = nodeinfo[node].payload.get('memory_free', None)
5020
  if not isinstance(free_mem, int):
5021
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
5022
                               " was '%s'" % (node, free_mem),
5023
                               errors.ECODE_ENVIRON)
5024
  if requested > free_mem:
5025
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
5026
                               " needed %s MiB, available %s MiB" %
5027
                               (node, reason, requested, free_mem),
5028
                               errors.ECODE_NORES)
5029

    
5030

    
5031
def _CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
5032
  """Checks if nodes have enough free disk space in the all VGs.
5033

5034
  This function check if all given nodes have the needed amount of
5035
  free disk. In case any node has less disk or we cannot get the
5036
  information from the node, this function raise an OpPrereqError
5037
  exception.
5038

5039
  @type lu: C{LogicalUnit}
5040
  @param lu: a logical unit from which we get configuration data
5041
  @type nodenames: C{list}
5042
  @param nodenames: the list of node names to check
5043
  @type req_sizes: C{dict}
5044
  @param req_sizes: the hash of vg and corresponding amount of disk in
5045
      MiB to check for
5046
  @raise errors.OpPrereqError: if the node doesn't have enough disk,
5047
      or we cannot check the node
5048

5049
  """
5050
  for vg, req_size in req_sizes.items():
5051
    _CheckNodesFreeDiskOnVG(lu, nodenames, vg, req_size)
5052

    
5053

    
5054
def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
5055
  """Checks if nodes have enough free disk space in the specified VG.
5056

5057
  This function check if all given nodes have the needed amount of
5058
  free disk. In case any node has less disk or we cannot get the
5059
  information from the node, this function raise an OpPrereqError
5060
  exception.
5061

5062
  @type lu: C{LogicalUnit}
5063
  @param lu: a logical unit from which we get configuration data
5064
  @type nodenames: C{list}
5065
  @param nodenames: the list of node names to check
5066
  @type vg: C{str}
5067
  @param vg: the volume group to check
5068
  @type requested: C{int}
5069
  @param requested: the amount of disk in MiB to check for
5070
  @raise errors.OpPrereqError: if the node doesn't have enough disk,
5071
      or we cannot check the node
5072

5073
  """
5074
  nodeinfo = lu.rpc.call_node_info(nodenames, vg, None)
5075
  for node in nodenames:
5076
    info = nodeinfo[node]
5077
    info.Raise("Cannot get current information from node %s" % node,
5078
               prereq=True, ecode=errors.ECODE_ENVIRON)
5079
    vg_free = info.payload.get("vg_free", None)
5080
    if not isinstance(vg_free, int):
5081
      raise errors.OpPrereqError("Can't compute free disk space on node"
5082
                                 " %s for vg %s, result was '%s'" %
5083
                                 (node, vg, vg_free), errors.ECODE_ENVIRON)
5084
    if requested > vg_free:
5085
      raise errors.OpPrereqError("Not enough disk space on target node %s"
5086
                                 " vg %s: required %d MiB, available %d MiB" %
5087
                                 (node, vg, requested, vg_free),
5088
                                 errors.ECODE_NORES)
5089

    
5090

    
5091
class LUInstanceStartup(LogicalUnit):
5092
  """Starts an instance.
5093

5094
  """
5095
  HPATH = "instance-start"
5096
  HTYPE = constants.HTYPE_INSTANCE
5097
  REQ_BGL = False
5098

    
5099
  def CheckArguments(self):
5100
    # extra beparams
5101
    if self.op.beparams:
5102
      # fill the beparams dict
5103
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
5104

    
5105
  def ExpandNames(self):
5106
    self._ExpandAndLockInstance()
5107

    
5108
  def BuildHooksEnv(self):
5109
    """Build hooks env.
5110

5111
    This runs on master, primary and secondary nodes of the instance.
5112

5113
    """
5114
    env = {
5115
      "FORCE": self.op.force,
5116
      }
5117
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5118
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5119
    return env, nl, nl
5120

    
5121
  def CheckPrereq(self):
5122
    """Check prerequisites.
5123

5124
    This checks that the instance is in the cluster.
5125

5126
    """
5127
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5128
    assert self.instance is not None, \
5129
      "Cannot retrieve locked instance %s" % self.op.instance_name
5130

    
5131
    # extra hvparams
5132
    if self.op.hvparams:
5133
      # check hypervisor parameter syntax (locally)
5134
      cluster = self.cfg.GetClusterInfo()
5135
      utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
5136
      filled_hvp = cluster.FillHV(instance)
5137
      filled_hvp.update(self.op.hvparams)
5138
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
5139
      hv_type.CheckParameterSyntax(filled_hvp)
5140
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
5141

    
5142
    self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline
5143

    
5144
    if self.primary_offline and self.op.ignore_offline_nodes:
5145
      self.proc.LogWarning("Ignoring offline primary node")
5146

    
5147
      if self.op.hvparams or self.op.beparams:
5148
        self.proc.LogWarning("Overridden parameters are ignored")
5149
    else:
5150
      _CheckNodeOnline(self, instance.primary_node)
5151

    
5152
      bep = self.cfg.GetClusterInfo().FillBE(instance)
5153

    
5154
      # check bridges existence
5155
      _CheckInstanceBridgesExist(self, instance)
5156

    
5157
      remote_info = self.rpc.call_instance_info(instance.primary_node,
5158
                                                instance.name,
5159
                                                instance.hypervisor)
5160
      remote_info.Raise("Error checking node %s" % instance.primary_node,
5161
                        prereq=True, ecode=errors.ECODE_ENVIRON)
5162
      if not remote_info.payload: # not running already
5163
        _CheckNodeFreeMemory(self, instance.primary_node,
5164
                             "starting instance %s" % instance.name,
5165
                             bep[constants.BE_MEMORY], instance.hypervisor)
5166

    
5167
  def Exec(self, feedback_fn):
5168
    """Start the instance.
5169

5170
    """
5171
    instance = self.instance
5172
    force = self.op.force
5173

    
5174
    self.cfg.MarkInstanceUp(instance.name)
5175

    
5176
    if self.primary_offline:
5177
      assert self.op.ignore_offline_nodes
5178
      self.proc.LogInfo("Primary node offline, marked instance as started")
5179
    else:
5180
      node_current = instance.primary_node
5181

    
5182
      _StartInstanceDisks(self, instance, force)
5183

    
5184
      result = self.rpc.call_instance_start(node_current, instance,
5185
                                            self.op.hvparams, self.op.beparams)
5186
      msg = result.fail_msg
5187
      if msg:
5188
        _ShutdownInstanceDisks(self, instance)
5189
        raise errors.OpExecError("Could not start instance: %s" % msg)
5190

    
5191

    
5192
class LUInstanceReboot(LogicalUnit):
5193
  """Reboot an instance.
5194

5195
  """
5196
  HPATH = "instance-reboot"
5197
  HTYPE = constants.HTYPE_INSTANCE
5198
  REQ_BGL = False
5199

    
5200
  def ExpandNames(self):
5201
    self._ExpandAndLockInstance()
5202

    
5203
  def BuildHooksEnv(self):
5204
    """Build hooks env.
5205

5206
    This runs on master, primary and secondary nodes of the instance.
5207

5208
    """
5209
    env = {
5210
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
5211
      "REBOOT_TYPE": self.op.reboot_type,
5212
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5213
      }
5214
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5215
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5216
    return env, nl, nl
5217

    
5218
  def CheckPrereq(self):
5219
    """Check prerequisites.
5220

5221
    This checks that the instance is in the cluster.
5222

5223
    """
5224
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5225
    assert self.instance is not None, \
5226
      "Cannot retrieve locked instance %s" % self.op.instance_name
5227

    
5228
    _CheckNodeOnline(self, instance.primary_node)
5229

    
5230
    # check bridges existence
5231
    _CheckInstanceBridgesExist(self, instance)
5232

    
5233
  def Exec(self, feedback_fn):
5234
    """Reboot the instance.
5235

5236
    """
5237
    instance = self.instance
5238
    ignore_secondaries = self.op.ignore_secondaries
5239
    reboot_type = self.op.reboot_type
5240

    
5241
    remote_info = self.rpc.call_instance_info(instance.primary_node,
5242
                                              instance.name,
5243
                                              instance.hypervisor)
5244
    remote_info.Raise("Error checking node %s" % instance.primary_node)
5245
    instance_running = bool(remote_info.payload)
5246

    
5247
    node_current = instance.primary_node
5248

    
5249
    if instance_running and reboot_type in [constants.INSTANCE_REBOOT_SOFT,
5250
                                            constants.INSTANCE_REBOOT_HARD]:
5251
      for disk in instance.disks:
5252
        self.cfg.SetDiskID(disk, node_current)
5253
      result = self.rpc.call_instance_reboot(node_current, instance,
5254
                                             reboot_type,
5255
                                             self.op.shutdown_timeout)
5256
      result.Raise("Could not reboot instance")
5257
    else:
5258
      if instance_running:
5259
        result = self.rpc.call_instance_shutdown(node_current, instance,
5260
                                                 self.op.shutdown_timeout)
5261
        result.Raise("Could not shutdown instance for full reboot")
5262
        _ShutdownInstanceDisks(self, instance)
5263
      else:
5264
        self.LogInfo("Instance %s was already stopped, starting now",
5265
                     instance.name)
5266
      _StartInstanceDisks(self, instance, ignore_secondaries)
5267
      result = self.rpc.call_instance_start(node_current, instance, None, None)
5268
      msg = result.fail_msg
5269
      if msg:
5270
        _ShutdownInstanceDisks(self, instance)
5271
        raise errors.OpExecError("Could not start instance for"
5272
                                 " full reboot: %s" % msg)
5273

    
5274
    self.cfg.MarkInstanceUp(instance.name)
5275

    
5276

    
5277
class LUInstanceShutdown(LogicalUnit):
5278
  """Shutdown an instance.
5279

5280
  """
5281
  HPATH = "instance-stop"
5282
  HTYPE = constants.HTYPE_INSTANCE
5283
  REQ_BGL = False
5284

    
5285
  def ExpandNames(self):
5286
    self._ExpandAndLockInstance()
5287

    
5288
  def BuildHooksEnv(self):
5289
    """Build hooks env.
5290

5291
    This runs on master, primary and secondary nodes of the instance.
5292

5293
    """
5294
    env = _BuildInstanceHookEnvByObject(self, self.instance)
5295
    env["TIMEOUT"] = self.op.timeout
5296
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5297
    return env, nl, nl
5298

    
5299
  def CheckPrereq(self):
5300
    """Check prerequisites.
5301

5302
    This checks that the instance is in the cluster.
5303

5304
    """
5305
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5306
    assert self.instance is not None, \
5307
      "Cannot retrieve locked instance %s" % self.op.instance_name
5308

    
5309
    self.primary_offline = \
5310
      self.cfg.GetNodeInfo(self.instance.primary_node).offline
5311

    
5312
    if self.primary_offline and self.op.ignore_offline_nodes:
5313
      self.proc.LogWarning("Ignoring offline primary node")
5314
    else:
5315
      _CheckNodeOnline(self, self.instance.primary_node)
5316

    
5317
  def Exec(self, feedback_fn):
5318
    """Shutdown the instance.
5319

5320
    """
5321
    instance = self.instance
5322
    node_current = instance.primary_node
5323
    timeout = self.op.timeout
5324

    
5325
    self.cfg.MarkInstanceDown(instance.name)
5326

    
5327
    if self.primary_offline:
5328
      assert self.op.ignore_offline_nodes
5329
      self.proc.LogInfo("Primary node offline, marked instance as stopped")
5330
    else:
5331
      result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
5332
      msg = result.fail_msg
5333
      if msg:
5334
        self.proc.LogWarning("Could not shutdown instance: %s" % msg)
5335

    
5336
      _ShutdownInstanceDisks(self, instance)
5337

    
5338

    
5339
class LUInstanceReinstall(LogicalUnit):
5340
  """Reinstall an instance.
5341

5342
  """
5343
  HPATH = "instance-reinstall"
5344
  HTYPE = constants.HTYPE_INSTANCE
5345
  REQ_BGL = False
5346

    
5347
  def ExpandNames(self):
5348
    self._ExpandAndLockInstance()
5349

    
5350
  def BuildHooksEnv(self):
5351
    """Build hooks env.
5352

5353
    This runs on master, primary and secondary nodes of the instance.
5354

5355
    """
5356
    env = _BuildInstanceHookEnvByObject(self, self.instance)
5357
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5358
    return env, nl, nl
5359

    
5360
  def CheckPrereq(self):
5361
    """Check prerequisites.
5362

5363
    This checks that the instance is in the cluster and is not running.
5364

5365
    """
5366
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5367
    assert instance is not None, \
5368
      "Cannot retrieve locked instance %s" % self.op.instance_name
5369
    _CheckNodeOnline(self, instance.primary_node, "Instance primary node"
5370
                     " offline, cannot reinstall")
5371
    for node in instance.secondary_nodes:
5372
      _CheckNodeOnline(self, node, "Instance secondary node offline,"
5373
                       " cannot reinstall")
5374

    
5375
    if instance.disk_template == constants.DT_DISKLESS:
5376
      raise errors.OpPrereqError("Instance '%s' has no disks" %
5377
                                 self.op.instance_name,
5378
                                 errors.ECODE_INVAL)
5379
    _CheckInstanceDown(self, instance, "cannot reinstall")
5380

    
5381
    if self.op.os_type is not None:
5382
      # OS verification
5383
      pnode = _ExpandNodeName(self.cfg, instance.primary_node)
5384
      _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
5385
      instance_os = self.op.os_type
5386
    else:
5387
      instance_os = instance.os
5388

    
5389
    nodelist = list(instance.all_nodes)
5390

    
5391
    if self.op.osparams:
5392
      i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
5393
      _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
5394
      self.os_inst = i_osdict # the new dict (without defaults)
5395
    else:
5396
      self.os_inst = None
5397

    
5398
    self.instance = instance
5399

    
5400
  def Exec(self, feedback_fn):
5401
    """Reinstall the instance.
5402

5403
    """
5404
    inst = self.instance
5405

    
5406
    if self.op.os_type is not None:
5407
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
5408
      inst.os = self.op.os_type
5409
      # Write to configuration
5410
      self.cfg.Update(inst, feedback_fn)
5411

    
5412
    _StartInstanceDisks(self, inst, None)
5413
    try:
5414
      feedback_fn("Running the instance OS create scripts...")
5415
      # FIXME: pass debug option from opcode to backend
5416
      result = self.rpc.call_instance_os_add(inst.primary_node, inst, True,
5417
                                             self.op.debug_level,
5418
                                             osparams=self.os_inst)
5419
      result.Raise("Could not install OS for instance %s on node %s" %
5420
                   (inst.name, inst.primary_node))
5421
    finally:
5422
      _ShutdownInstanceDisks(self, inst)
5423

    
5424

    
5425
class LUInstanceRecreateDisks(LogicalUnit):
5426
  """Recreate an instance's missing disks.
5427

5428
  """
5429
  HPATH = "instance-recreate-disks"
5430
  HTYPE = constants.HTYPE_INSTANCE
5431
  REQ_BGL = False
5432

    
5433
  def ExpandNames(self):
5434
    self._ExpandAndLockInstance()
5435

    
5436
  def BuildHooksEnv(self):
5437
    """Build hooks env.
5438

5439
    This runs on master, primary and secondary nodes of the instance.
5440

5441
    """
5442
    env = _BuildInstanceHookEnvByObject(self, self.instance)
5443
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5444
    return env, nl, nl
5445

    
5446
  def CheckPrereq(self):
5447
    """Check prerequisites.
5448

5449
    This checks that the instance is in the cluster and is not running.
5450

5451
    """
5452
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5453
    assert instance is not None, \
5454
      "Cannot retrieve locked instance %s" % self.op.instance_name
5455
    _CheckNodeOnline(self, instance.primary_node)
5456

    
5457
    if instance.disk_template == constants.DT_DISKLESS:
5458
      raise errors.OpPrereqError("Instance '%s' has no disks" %
5459
                                 self.op.instance_name, errors.ECODE_INVAL)
5460
    _CheckInstanceDown(self, instance, "cannot recreate disks")
5461

    
5462
    if not self.op.disks:
5463
      self.op.disks = range(len(instance.disks))
5464
    else:
5465
      for idx in self.op.disks:
5466
        if idx >= len(instance.disks):
5467
          raise errors.OpPrereqError("Invalid disk index passed '%s'" % idx,
5468
                                     errors.ECODE_INVAL)
5469

    
5470
    self.instance = instance
5471

    
5472
  def Exec(self, feedback_fn):
5473
    """Recreate the disks.
5474

5475
    """
5476
    to_skip = []
5477
    for idx, _ in enumerate(self.instance.disks):
5478
      if idx not in self.op.disks: # disk idx has not been passed in
5479
        to_skip.append(idx)
5480
        continue
5481

    
5482
    _CreateDisks(self, self.instance, to_skip=to_skip)
5483

    
5484

    
5485
class LUInstanceRename(LogicalUnit):
5486
  """Rename an instance.
5487

5488
  """
5489
  HPATH = "instance-rename"
5490
  HTYPE = constants.HTYPE_INSTANCE
5491

    
5492
  def CheckArguments(self):
5493
    """Check arguments.
5494

5495
    """
5496
    if self.op.ip_check and not self.op.name_check:
5497
      # TODO: make the ip check more flexible and not depend on the name check
5498
      raise errors.OpPrereqError("Cannot do ip check without a name check",
5499
                                 errors.ECODE_INVAL)
5500

    
5501
  def BuildHooksEnv(self):
5502
    """Build hooks env.
5503

5504
    This runs on master, primary and secondary nodes of the instance.
5505

5506
    """
5507
    env = _BuildInstanceHookEnvByObject(self, self.instance)
5508
    env["INSTANCE_NEW_NAME"] = self.op.new_name
5509
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5510
    return env, nl, nl
5511

    
5512
  def CheckPrereq(self):
5513
    """Check prerequisites.
5514

5515
    This checks that the instance is in the cluster and is not running.
5516

5517
    """
5518
    self.op.instance_name = _ExpandInstanceName(self.cfg,
5519
                                                self.op.instance_name)
5520
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5521
    assert instance is not None
5522
    _CheckNodeOnline(self, instance.primary_node)
5523
    _CheckInstanceDown(self, instance, "cannot rename")
5524
    self.instance = instance
5525

    
5526
    new_name = self.op.new_name
5527
    if self.op.name_check:
5528
      hostname = netutils.GetHostname(name=new_name)
5529
      self.LogInfo("Resolved given name '%s' to '%s'", new_name,
5530
                   hostname.name)
5531
      new_name = self.op.new_name = hostname.name
5532
      if (self.op.ip_check and
5533
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
5534
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
5535
                                   (hostname.ip, new_name),
5536
                                   errors.ECODE_NOTUNIQUE)
5537

    
5538
    instance_list = self.cfg.GetInstanceList()
5539
    if new_name in instance_list and new_name != instance.name:
5540
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
5541
                                 new_name, errors.ECODE_EXISTS)
5542

    
5543
  def Exec(self, feedback_fn):
5544
    """Rename the instance.
5545

5546
    """
5547
    inst = self.instance
5548
    old_name = inst.name
5549

    
5550
    rename_file_storage = False
5551
    if (inst.disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE) and
5552
        self.op.new_name != inst.name):
5553
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
5554
      rename_file_storage = True
5555

    
5556
    self.cfg.RenameInstance(inst.name, self.op.new_name)
5557
    # Change the instance lock. This is definitely safe while we hold the BGL
5558
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
5559
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
5560

    
5561
    # re-read the instance from the configuration after rename
5562
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
5563

    
5564
    if rename_file_storage:
5565
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
5566
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
5567
                                                     old_file_storage_dir,
5568
                                                     new_file_storage_dir)
5569
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
5570
                   " (but the instance has been renamed in Ganeti)" %
5571
                   (inst.primary_node, old_file_storage_dir,
5572
                    new_file_storage_dir))
5573

    
5574
    _StartInstanceDisks(self, inst, None)
5575
    try:
5576
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
5577
                                                 old_name, self.op.debug_level)
5578
      msg = result.fail_msg
5579
      if msg:
5580
        msg = ("Could not run OS rename script for instance %s on node %s"
5581
               " (but the instance has been renamed in Ganeti): %s" %
5582
               (inst.name, inst.primary_node, msg))
5583
        self.proc.LogWarning(msg)
5584
    finally:
5585
      _ShutdownInstanceDisks(self, inst)
5586

    
5587
    return inst.name
5588

    
5589

    
5590
class LUInstanceRemove(LogicalUnit):
5591
  """Remove an instance.
5592

5593
  """
5594
  HPATH = "instance-remove"
5595
  HTYPE = constants.HTYPE_INSTANCE
5596
  REQ_BGL = False
5597

    
5598
  def ExpandNames(self):
5599
    self._ExpandAndLockInstance()
5600
    self.needed_locks[locking.LEVEL_NODE] = []
5601
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5602

    
5603
  def DeclareLocks(self, level):
5604
    if level == locking.LEVEL_NODE:
5605
      self._LockInstancesNodes()
5606

    
5607
  def BuildHooksEnv(self):
5608
    """Build hooks env.
5609

5610
    This runs on master, primary and secondary nodes of the instance.
5611

5612
    """
5613
    env = _BuildInstanceHookEnvByObject(self, self.instance)
5614
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
5615
    nl = [self.cfg.GetMasterNode()]
5616
    nl_post = list(self.instance.all_nodes) + nl
5617
    return env, nl, nl_post
5618

    
5619
  def CheckPrereq(self):
5620
    """Check prerequisites.
5621

5622
    This checks that the instance is in the cluster.
5623

5624
    """
5625
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5626
    assert self.instance is not None, \
5627
      "Cannot retrieve locked instance %s" % self.op.instance_name
5628

    
5629
  def Exec(self, feedback_fn):
5630
    """Remove the instance.
5631

5632
    """
5633
    instance = self.instance
5634
    logging.info("Shutting down instance %s on node %s",
5635
                 instance.name, instance.primary_node)
5636

    
5637
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
5638
                                             self.op.shutdown_timeout)
5639
    msg = result.fail_msg
5640
    if msg:
5641
      if self.op.ignore_failures:
5642
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
5643
      else:
5644
        raise errors.OpExecError("Could not shutdown instance %s on"
5645
                                 " node %s: %s" %
5646
                                 (instance.name, instance.primary_node, msg))
5647

    
5648
    _RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
5649

    
5650

    
5651
def _RemoveInstance(lu, feedback_fn, instance, ignore_failures):
5652
  """Utility function to remove an instance.
5653

5654
  """
5655
  logging.info("Removing block devices for instance %s", instance.name)
5656

    
5657
  if not _RemoveDisks(lu, instance):
5658
    if not ignore_failures:
5659
      raise errors.OpExecError("Can't remove instance's disks")
5660
    feedback_fn("Warning: can't remove instance's disks")
5661

    
5662
  logging.info("Removing instance %s out of cluster config", instance.name)
5663

    
5664
  lu.cfg.RemoveInstance(instance.name)
5665

    
5666
  assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
5667
    "Instance lock removal conflict"
5668

    
5669
  # Remove lock for the instance
5670
  lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
5671

    
5672

    
5673
class LUInstanceQuery(NoHooksLU):
5674
  """Logical unit for querying instances.
5675

5676
  """
5677
  # pylint: disable-msg=W0142
5678
  REQ_BGL = False
5679

    
5680
  def CheckArguments(self):
5681
    self.iq = _InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
5682
                             self.op.output_fields, self.op.use_locking)
5683

    
5684
  def ExpandNames(self):
5685
    self.iq.ExpandNames(self)
5686

    
5687
  def DeclareLocks(self, level):
5688
    self.iq.DeclareLocks(self, level)
5689

    
5690
  def Exec(self, feedback_fn):
5691
    return self.iq.OldStyleQuery(self)
5692

    
5693

    
5694
class LUInstanceFailover(LogicalUnit):
5695
  """Failover an instance.
5696

5697
  """
5698
  HPATH = "instance-failover"
5699
  HTYPE = constants.HTYPE_INSTANCE
5700
  REQ_BGL = False
5701

    
5702
  def ExpandNames(self):
5703
    self._ExpandAndLockInstance()
5704
    self.needed_locks[locking.LEVEL_NODE] = []
5705
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5706

    
5707
  def DeclareLocks(self, level):
5708
    if level == locking.LEVEL_NODE:
5709
      self._LockInstancesNodes()
5710

    
5711
  def BuildHooksEnv(self):
5712
    """Build hooks env.
5713

5714
    This runs on master, primary and secondary nodes of the instance.
5715

5716
    """
5717
    instance = self.instance
5718
    source_node = instance.primary_node
5719
    target_node = instance.secondary_nodes[0]
5720
    env = {
5721
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
5722
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5723
      "OLD_PRIMARY": source_node,
5724
      "OLD_SECONDARY": target_node,
5725
      "NEW_PRIMARY": target_node,
5726
      "NEW_SECONDARY": source_node,
5727
      }
5728
    env.update(_BuildInstanceHookEnvByObject(self, instance))
5729
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
5730
    nl_post = list(nl)
5731
    nl_post.append(source_node)
5732
    return env, nl, nl_post
5733

    
5734
  def CheckPrereq(self):
5735
    """Check prerequisites.
5736

5737
    This checks that the instance is in the cluster.
5738

5739
    """
5740
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5741
    assert self.instance is not None, \
5742
      "Cannot retrieve locked instance %s" % self.op.instance_name
5743

    
5744
    bep = self.cfg.GetClusterInfo().FillBE(instance)
5745
    if instance.disk_template not in constants.DTS_NET_MIRROR:
5746
      raise errors.OpPrereqError("Instance's disk layout is not"
5747
                                 " network mirrored, cannot failover.",
5748
                                 errors.ECODE_STATE)
5749

    
5750
    secondary_nodes = instance.secondary_nodes
5751
    if not secondary_nodes:
5752
      raise errors.ProgrammerError("no secondary node but using "
5753
                                   "a mirrored disk template")
5754

    
5755
    target_node = secondary_nodes[0]
5756
    _CheckNodeOnline(self, target_node)
5757
    _CheckNodeNotDrained(self, target_node)
5758
    if instance.admin_up:
5759
      # check memory requirements on the secondary node
5760
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
5761
                           instance.name, bep[constants.BE_MEMORY],
5762
                           instance.hypervisor)
5763
    else:
5764
      self.LogInfo("Not checking memory on the secondary node as"
5765
                   " instance will not be started")
5766

    
5767
    # check bridge existance
5768
    _CheckInstanceBridgesExist(self, instance, node=target_node)
5769

    
5770
  def Exec(self, feedback_fn):
5771
    """Failover an instance.
5772

5773
    The failover is done by shutting it down on its present node and
5774
    starting it on the secondary.
5775

5776
    """
5777
    instance = self.instance
5778
    primary_node = self.cfg.GetNodeInfo(instance.primary_node)
5779

    
5780
    source_node = instance.primary_node
5781
    target_node = instance.secondary_nodes[0]
5782

    
5783
    if instance.admin_up:
5784
      feedback_fn("* checking disk consistency between source and target")
5785
      for dev in instance.disks:
5786
        # for drbd, these are drbd over lvm
5787
        if not _CheckDiskConsistency(self, dev, target_node, False):
5788
          if not self.op.ignore_consistency:
5789
            raise errors.OpExecError("Disk %s is degraded on target node,"
5790
                                     " aborting failover." % dev.iv_name)
5791
    else:
5792
      feedback_fn("* not checking disk consistency as instance is not running")
5793

    
5794
    feedback_fn("* shutting down instance on source node")
5795
    logging.info("Shutting down instance %s on node %s",
5796
                 instance.name, source_node)
5797

    
5798
    result = self.rpc.call_instance_shutdown(source_node, instance,
5799
                                             self.op.shutdown_timeout)
5800
    msg = result.fail_msg
5801
    if msg:
5802
      if self.op.ignore_consistency or primary_node.offline:
5803
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
5804
                             " Proceeding anyway. Please make sure node"
5805
                             " %s is down. Error details: %s",
5806
                             instance.name, source_node, source_node, msg)
5807
      else:
5808
        raise errors.OpExecError("Could not shutdown instance %s on"
5809
                                 " node %s: %s" %
5810
                                 (instance.name, source_node, msg))
5811

    
5812
    feedback_fn("* deactivating the instance's disks on source node")
5813
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
5814
      raise errors.OpExecError("Can't shut down the instance's disks.")
5815

    
5816
    instance.primary_node = target_node
5817
    # distribute new instance config to the other nodes
5818
    self.cfg.Update(instance, feedback_fn)
5819

    
5820
    # Only start the instance if it's marked as up
5821
    if instance.admin_up:
5822
      feedback_fn("* activating the instance's disks on target node")
5823
      logging.info("Starting instance %s on node %s",
5824
                   instance.name, target_node)
5825

    
5826
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
5827
                                           ignore_secondaries=True)
5828
      if not disks_ok:
5829
        _ShutdownInstanceDisks(self, instance)
5830
        raise errors.OpExecError("Can't activate the instance's disks")
5831

    
5832
      feedback_fn("* starting the instance on the target node")
5833
      result = self.rpc.call_instance_start(target_node, instance, None, None)
5834
      msg = result.fail_msg
5835
      if msg:
5836
        _ShutdownInstanceDisks(self, instance)
5837
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
5838
                                 (instance.name, target_node, msg))
5839

    
5840

    
5841
class LUInstanceMigrate(LogicalUnit):
5842
  """Migrate an instance.
5843

5844
  This is migration without shutting down, compared to the failover,
5845
  which is done with shutdown.
5846

5847
  """
5848
  HPATH = "instance-migrate"
5849
  HTYPE = constants.HTYPE_INSTANCE
5850
  REQ_BGL = False
5851

    
5852
  def ExpandNames(self):
5853
    self._ExpandAndLockInstance()
5854

    
5855
    self.needed_locks[locking.LEVEL_NODE] = []
5856
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5857

    
5858
    self._migrater = TLMigrateInstance(self, self.op.instance_name,
5859
                                       self.op.cleanup)
5860
    self.tasklets = [self._migrater]
5861

    
5862
  def DeclareLocks(self, level):
5863
    if level == locking.LEVEL_NODE:
5864
      self._LockInstancesNodes()
5865

    
5866
  def BuildHooksEnv(self):
5867
    """Build hooks env.
5868

5869
    This runs on master, primary and secondary nodes of the instance.
5870

5871
    """
5872
    instance = self._migrater.instance
5873
    source_node = instance.primary_node
5874
    target_node = instance.secondary_nodes[0]
5875
    env = _BuildInstanceHookEnvByObject(self, instance)
5876
    env["MIGRATE_LIVE"] = self._migrater.live
5877
    env["MIGRATE_CLEANUP"] = self.op.cleanup
5878
    env.update({
5879
        "OLD_PRIMARY": source_node,
5880
        "OLD_SECONDARY": target_node,
5881
        "NEW_PRIMARY": target_node,
5882
        "NEW_SECONDARY": source_node,
5883
        })
5884
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
5885
    nl_post = list(nl)
5886
    nl_post.append(source_node)
5887
    return env, nl, nl_post
5888

    
5889

    
5890
class LUInstanceMove(LogicalUnit):
5891
  """Move an instance by data-copying.
5892

5893
  """
5894
  HPATH = "instance-move"
5895
  HTYPE = constants.HTYPE_INSTANCE
5896
  REQ_BGL = False
5897

    
5898
  def ExpandNames(self):
5899
    self._ExpandAndLockInstance()
5900
    target_node = _ExpandNodeName(self.cfg, self.op.target_node)
5901
    self.op.target_node = target_node
5902
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
5903
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5904

    
5905
  def DeclareLocks(self, level):
5906
    if level == locking.LEVEL_NODE:
5907
      self._LockInstancesNodes(primary_only=True)
5908

    
5909
  def BuildHooksEnv(self):
5910
    """Build hooks env.
5911

5912
    This runs on master, primary and secondary nodes of the instance.
5913

5914
    """
5915
    env = {
5916
      "TARGET_NODE": self.op.target_node,
5917
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5918
      }
5919
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5920
    nl = [self.cfg.GetMasterNode()] + [self.instance.primary_node,
5921
                                       self.op.target_node]
5922
    return env, nl, nl
5923

    
5924
  def CheckPrereq(self):
5925
    """Check prerequisites.
5926

5927
    This checks that the instance is in the cluster.
5928

5929
    """
5930
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5931
    assert self.instance is not None, \
5932
      "Cannot retrieve locked instance %s" % self.op.instance_name
5933

    
5934
    node = self.cfg.GetNodeInfo(self.op.target_node)
5935
    assert node is not None, \
5936
      "Cannot retrieve locked node %s" % self.op.target_node
5937

    
5938
    self.target_node = target_node = node.name
5939

    
5940
    if target_node == instance.primary_node:
5941
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
5942
                                 (instance.name, target_node),
5943
                                 errors.ECODE_STATE)
5944

    
5945
    bep = self.cfg.GetClusterInfo().FillBE(instance)
5946

    
5947
    for idx, dsk in enumerate(instance.disks):
5948
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
5949
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
5950
                                   " cannot copy" % idx, errors.ECODE_STATE)
5951

    
5952
    _CheckNodeOnline(self, target_node)
5953
    _CheckNodeNotDrained(self, target_node)
5954
    _CheckNodeVmCapable(self, target_node)
5955

    
5956
    if instance.admin_up:
5957
      # check memory requirements on the secondary node
5958
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
5959
                           instance.name, bep[constants.BE_MEMORY],
5960
                           instance.hypervisor)
5961
    else:
5962
      self.LogInfo("Not checking memory on the secondary node as"
5963
                   " instance will not be started")
5964

    
5965
    # check bridge existance
5966
    _CheckInstanceBridgesExist(self, instance, node=target_node)
5967

    
5968
  def Exec(self, feedback_fn):
5969
    """Move an instance.
5970

5971
    The move is done by shutting it down on its present node, copying
5972
    the data over (slow) and starting it on the new node.
5973

5974
    """
5975
    instance = self.instance
5976

    
5977
    source_node = instance.primary_node
5978
    target_node = self.target_node
5979

    
5980
    self.LogInfo("Shutting down instance %s on source node %s",
5981
                 instance.name, source_node)
5982

    
5983
    result = self.rpc.call_instance_shutdown(source_node, instance,
5984
                                             self.op.shutdown_timeout)
5985
    msg = result.fail_msg
5986
    if msg:
5987
      if self.op.ignore_consistency:
5988
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
5989
                             " Proceeding anyway. Please make sure node"
5990
                             " %s is down. Error details: %s",
5991
                             instance.name, source_node, source_node, msg)
5992
      else:
5993
        raise errors.OpExecError("Could not shutdown instance %s on"
5994
                                 " node %s: %s" %
5995
                                 (instance.name, source_node, msg))
5996

    
5997
    # create the target disks
5998
    try:
5999
      _CreateDisks(self, instance, target_node=target_node)
6000
    except errors.OpExecError:
6001
      self.LogWarning("Device creation failed, reverting...")
6002
      try:
6003
        _RemoveDisks(self, instance, target_node=target_node)
6004
      finally:
6005
        self.cfg.ReleaseDRBDMinors(instance.name)
6006
        raise
6007

    
6008
    cluster_name = self.cfg.GetClusterInfo().cluster_name
6009

    
6010
    errs = []
6011
    # activate, get path, copy the data over
6012
    for idx, disk in enumerate(instance.disks):
6013
      self.LogInfo("Copying data for disk %d", idx)
6014
      result = self.rpc.call_blockdev_assemble(target_node, disk,
6015
                                               instance.name, True, idx)
6016
      if result.fail_msg:
6017
        self.LogWarning("Can't assemble newly created disk %d: %s",
6018
                        idx, result.fail_msg)
6019
        errs.append(result.fail_msg)
6020
        break
6021
      dev_path = result.payload
6022
      result = self.rpc.call_blockdev_export(source_node, disk,
6023
                                             target_node, dev_path,
6024
                                             cluster_name)
6025
      if result.fail_msg:
6026
        self.LogWarning("Can't copy data over for disk %d: %s",
6027
                        idx, result.fail_msg)
6028
        errs.append(result.fail_msg)
6029
        break
6030

    
6031
    if errs:
6032
      self.LogWarning("Some disks failed to copy, aborting")
6033
      try:
6034
        _RemoveDisks(self, instance, target_node=target_node)
6035
      finally:
6036
        self.cfg.ReleaseDRBDMinors(instance.name)
6037
        raise errors.OpExecError("Errors during disk copy: %s" %
6038
                                 (",".join(errs),))
6039

    
6040
    instance.primary_node = target_node
6041
    self.cfg.Update(instance, feedback_fn)
6042

    
6043
    self.LogInfo("Removing the disks on the original node")
6044
    _RemoveDisks(self, instance, target_node=source_node)
6045

    
6046
    # Only start the instance if it's marked as up
6047
    if instance.admin_up:
6048
      self.LogInfo("Starting instance %s on node %s",
6049
                   instance.name, target_node)
6050

    
6051
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
6052
                                           ignore_secondaries=True)
6053
      if not disks_ok:
6054
        _ShutdownInstanceDisks(self, instance)
6055
        raise errors.OpExecError("Can't activate the instance's disks")
6056

    
6057
      result = self.rpc.call_instance_start(target_node, instance, None, None)
6058
      msg = result.fail_msg
6059
      if msg:
6060
        _ShutdownInstanceDisks(self, instance)
6061
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
6062
                                 (instance.name, target_node, msg))
6063

    
6064

    
6065
class LUNodeMigrate(LogicalUnit):
6066
  """Migrate all instances from a node.
6067

6068
  """
6069
  HPATH = "node-migrate"
6070
  HTYPE = constants.HTYPE_NODE
6071
  REQ_BGL = False
6072

    
6073
  def ExpandNames(self):
6074
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
6075

    
6076
    self.needed_locks = {
6077
      locking.LEVEL_NODE: [self.op.node_name],
6078
      }
6079

    
6080
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6081

    
6082
    # Create tasklets for migrating instances for all instances on this node
6083
    names = []
6084
    tasklets = []
6085

    
6086
    for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name):
6087
      logging.debug("Migrating instance %s", inst.name)
6088
      names.append(inst.name)
6089

    
6090
      tasklets.append(TLMigrateInstance(self, inst.name, False))
6091

    
6092
    self.tasklets = tasklets
6093

    
6094
    # Declare instance locks
6095
    self.needed_locks[locking.LEVEL_INSTANCE] = names
6096

    
6097
  def DeclareLocks(self, level):
6098
    if level == locking.LEVEL_NODE:
6099
      self._LockInstancesNodes()
6100

    
6101
  def BuildHooksEnv(self):
6102
    """Build hooks env.
6103

6104
    This runs on the master, the primary and all the secondaries.
6105

6106
    """
6107
    env = {
6108
      "NODE_NAME": self.op.node_name,
6109
      }
6110

    
6111
    nl = [self.cfg.GetMasterNode()]
6112

    
6113
    return (env, nl, nl)
6114

    
6115

    
6116
class TLMigrateInstance(Tasklet):
6117
  """Tasklet class for instance migration.
6118

6119
  @type live: boolean
6120
  @ivar live: whether the migration will be done live or non-live;
6121
      this variable is initalized only after CheckPrereq has run
6122

6123
  """
6124
  def __init__(self, lu, instance_name, cleanup):
6125
    """Initializes this class.
6126

6127
    """
6128
    Tasklet.__init__(self, lu)
6129

    
6130
    # Parameters
6131
    self.instance_name = instance_name
6132
    self.cleanup = cleanup
6133
    self.live = False # will be overridden later
6134

    
6135
  def CheckPrereq(self):
6136
    """Check prerequisites.
6137

6138
    This checks that the instance is in the cluster.
6139

6140
    """
6141
    instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
6142
    instance = self.cfg.GetInstanceInfo(instance_name)
6143
    assert instance is not None
6144

    
6145
    if instance.disk_template != constants.DT_DRBD8:
6146
      raise errors.OpPrereqError("Instance's disk layout is not"
6147
                                 " drbd8, cannot migrate.", errors.ECODE_STATE)
6148

    
6149
    secondary_nodes = instance.secondary_nodes
6150
    if not secondary_nodes:
6151
      raise errors.ConfigurationError("No secondary node but using"
6152
                                      " drbd8 disk template")
6153

    
6154
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
6155

    
6156
    target_node = secondary_nodes[0]
6157
    # check memory requirements on the secondary node
6158
    _CheckNodeFreeMemory(self.lu, target_node, "migrating instance %s" %
6159
                         instance.name, i_be[constants.BE_MEMORY],
6160
                         instance.hypervisor)
6161

    
6162
    # check bridge existance
6163
    _CheckInstanceBridgesExist(self.lu, instance, node=target_node)
6164

    
6165
    if not self.cleanup:
6166
      _CheckNodeNotDrained(self.lu, target_node)
6167
      result = self.rpc.call_instance_migratable(instance.primary_node,
6168
                                                 instance)
6169
      result.Raise("Can't migrate, please use failover",
6170
                   prereq=True, ecode=errors.ECODE_STATE)
6171

    
6172
    self.instance = instance
6173

    
6174
    if self.lu.op.live is not None and self.lu.op.mode is not None:
6175
      raise errors.OpPrereqError("Only one of the 'live' and 'mode'"
6176
                                 " parameters are accepted",
6177
                                 errors.ECODE_INVAL)
6178
    if self.lu.op.live is not None:
6179
      if self.lu.op.live:
6180
        self.lu.op.mode = constants.HT_MIGRATION_LIVE
6181
      else:
6182
        self.lu.op.mode = constants.HT_MIGRATION_NONLIVE
6183
      # reset the 'live' parameter to None so that repeated
6184
      # invocations of CheckPrereq do not raise an exception
6185
      self.lu.op.live = None
6186
    elif self.lu.op.mode is None:
6187
      # read the default value from the hypervisor
6188
      i_hv = self.cfg.GetClusterInfo().FillHV(instance, skip_globals=False)
6189
      self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
6190

    
6191
    self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
6192

    
6193
  def _WaitUntilSync(self):
6194
    """Poll with custom rpc for disk sync.
6195

6196
    This uses our own step-based rpc call.
6197

6198
    """
6199
    self.feedback_fn("* wait until resync is done")
6200
    all_done = False
6201
    while not all_done:
6202
      all_done = True
6203
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
6204
                                            self.nodes_ip,
6205
                                            self.instance.disks)
6206
      min_percent = 100
6207
      for node, nres in result.items():
6208
        nres.Raise("Cannot resync disks on node %s" % node)
6209
        node_done, node_percent = nres.payload
6210
        all_done = all_done and node_done
6211
        if node_percent is not None:
6212
          min_percent = min(min_percent, node_percent)
6213
      if not all_done:
6214
        if min_percent < 100:
6215
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
6216
        time.sleep(2)
6217

    
6218
  def _EnsureSecondary(self, node):
6219
    """Demote a node to secondary.
6220

6221
    """
6222
    self.feedback_fn("* switching node %s to secondary mode" % node)
6223

    
6224
    for dev in self.instance.disks:
6225
      self.cfg.SetDiskID(dev, node)
6226

    
6227
    result = self.rpc.call_blockdev_close(node, self.instance.name,
6228
                                          self.instance.disks)
6229
    result.Raise("Cannot change disk to secondary on node %s" % node)
6230

    
6231
  def _GoStandalone(self):
6232
    """Disconnect from the network.
6233

6234
    """
6235
    self.feedback_fn("* changing into standalone mode")
6236
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
6237
                                               self.instance.disks)
6238
    for node, nres in result.items():
6239
      nres.Raise("Cannot disconnect disks node %s" % node)
6240

    
6241
  def _GoReconnect(self, multimaster):
6242
    """Reconnect to the network.
6243

6244
    """
6245
    if multimaster:
6246
      msg = "dual-master"
6247
    else:
6248
      msg = "single-master"
6249
    self.feedback_fn("* changing disks into %s mode" % msg)
6250
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
6251
                                           self.instance.disks,
6252
                                           self.instance.name, multimaster)
6253
    for node, nres in result.items():
6254
      nres.Raise("Cannot change disks config on node %s" % node)
6255

    
6256
  def _ExecCleanup(self):
6257
    """Try to cleanup after a failed migration.
6258

6259
    The cleanup is done by:
6260
      - check that the instance is running only on one node
6261
        (and update the config if needed)
6262
      - change disks on its secondary node to secondary
6263
      - wait until disks are fully synchronized
6264
      - disconnect from the network
6265
      - change disks into single-master mode
6266
      - wait again until disks are fully synchronized
6267

6268
    """
6269
    instance = self.instance
6270
    target_node = self.target_node
6271
    source_node = self.source_node
6272

    
6273
    # check running on only one node
6274
    self.feedback_fn("* checking where the instance actually runs"
6275
                     " (if this hangs, the hypervisor might be in"
6276
                     " a bad state)")
6277
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
6278
    for node, result in ins_l.items():
6279
      result.Raise("Can't contact node %s" % node)
6280

    
6281
    runningon_source = instance.name in ins_l[source_node].payload
6282
    runningon_target = instance.name in ins_l[target_node].payload
6283

    
6284
    if runningon_source and runningon_target:
6285
      raise errors.OpExecError("Instance seems to be running on two nodes,"
6286
                               " or the hypervisor is confused. You will have"
6287
                               " to ensure manually that it runs only on one"
6288
                               " and restart this operation.")
6289

    
6290
    if not (runningon_source or runningon_target):
6291
      raise errors.OpExecError("Instance does not seem to be running at all."
6292
                               " In this case, it's safer to repair by"
6293
                               " running 'gnt-instance stop' to ensure disk"
6294
                               " shutdown, and then restarting it.")
6295

    
6296
    if runningon_target:
6297
      # the migration has actually succeeded, we need to update the config
6298
      self.feedback_fn("* instance running on secondary node (%s),"
6299
                       " updating config" % target_node)
6300
      instance.primary_node = target_node
6301
      self.cfg.Update(instance, self.feedback_fn)
6302
      demoted_node = source_node
6303
    else:
6304
      self.feedback_fn("* instance confirmed to be running on its"
6305
                       " primary node (%s)" % source_node)
6306
      demoted_node = target_node
6307

    
6308
    self._EnsureSecondary(demoted_node)
6309
    try:
6310
      self._WaitUntilSync()
6311
    except errors.OpExecError:
6312
      # we ignore here errors, since if the device is standalone, it
6313
      # won't be able to sync
6314
      pass
6315
    self._GoStandalone()
6316
    self._GoReconnect(False)
6317
    self._WaitUntilSync()
6318

    
6319
    self.feedback_fn("* done")
6320

    
6321
  def _RevertDiskStatus(self):
6322
    """Try to revert the disk status after a failed migration.
6323

6324
    """
6325
    target_node = self.target_node
6326
    try:
6327
      self._EnsureSecondary(target_node)
6328
      self._GoStandalone()
6329
      self._GoReconnect(False)
6330
      self._WaitUntilSync()
6331
    except errors.OpExecError, err:
6332
      self.lu.LogWarning("Migration failed and I can't reconnect the"
6333
                         " drives: error '%s'\n"
6334
                         "Please look and recover the instance status" %
6335
                         str(err))
6336

    
6337
  def _AbortMigration(self):
6338
    """Call the hypervisor code to abort a started migration.
6339

6340
    """
6341
    instance = self.instance
6342
    target_node = self.target_node
6343
    migration_info = self.migration_info
6344

    
6345
    abort_result = self.rpc.call_finalize_migration(target_node,
6346
                                                    instance,
6347
                                                    migration_info,
6348
                                                    False)
6349
    abort_msg = abort_result.fail_msg
6350
    if abort_msg:
6351
      logging.error("Aborting migration failed on target node %s: %s",
6352
                    target_node, abort_msg)
6353
      # Don't raise an exception here, as we stil have to try to revert the
6354
      # disk status, even if this step failed.
6355

    
6356
  def _ExecMigration(self):
6357
    """Migrate an instance.
6358

6359
    The migrate is done by:
6360
      - change the disks into dual-master mode
6361
      - wait until disks are fully synchronized again
6362
      - migrate the instance
6363
      - change disks on the new secondary node (the old primary) to secondary
6364
      - wait until disks are fully synchronized
6365
      - change disks into single-master mode
6366

6367
    """
6368
    instance = self.instance
6369
    target_node = self.target_node
6370
    source_node = self.source_node
6371

    
6372
    self.feedback_fn("* checking disk consistency between source and target")
6373
    for dev in instance.disks:
6374
      if not _CheckDiskConsistency(self.lu, dev, target_node, False):
6375
        raise errors.OpExecError("Disk %s is degraded or not fully"
6376
                                 " synchronized on target node,"
6377
                                 " aborting migrate." % dev.iv_name)
6378

    
6379
    # First get the migration information from the remote node
6380
    result = self.rpc.call_migration_info(source_node, instance)
6381
    msg = result.fail_msg
6382
    if msg:
6383
      log_err = ("Failed fetching source migration information from %s: %s" %
6384
                 (source_node, msg))
6385
      logging.error(log_err)
6386
      raise errors.OpExecError(log_err)
6387

    
6388
    self.migration_info = migration_info = result.payload
6389

    
6390
    # Then switch the disks to master/master mode
6391
    self._EnsureSecondary(target_node)
6392
    self._GoStandalone()
6393
    self._GoReconnect(True)
6394
    self._WaitUntilSync()
6395

    
6396
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
6397
    result = self.rpc.call_accept_instance(target_node,
6398
                                           instance,
6399
                                           migration_info,
6400
                                           self.nodes_ip[target_node])
6401

    
6402
    msg = result.fail_msg
6403
    if msg:
6404
      logging.error("Instance pre-migration failed, trying to revert"
6405
                    " disk status: %s", msg)
6406
      self.feedback_fn("Pre-migration failed, aborting")
6407
      self._AbortMigration()
6408
      self._RevertDiskStatus()
6409
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
6410
                               (instance.name, msg))
6411

    
6412
    self.feedback_fn("* migrating instance to %s" % target_node)
6413
    time.sleep(10)
6414
    result = self.rpc.call_instance_migrate(source_node, instance,
6415
                                            self.nodes_ip[target_node],
6416
                                            self.live)
6417
    msg = result.fail_msg
6418
    if msg:
6419
      logging.error("Instance migration failed, trying to revert"
6420
                    " disk status: %s", msg)
6421
      self.feedback_fn("Migration failed, aborting")
6422
      self._AbortMigration()
6423
      self._RevertDiskStatus()
6424
      raise errors.OpExecError("Could not migrate instance %s: %s" %
6425
                               (instance.name, msg))
6426
    time.sleep(10)
6427

    
6428
    instance.primary_node = target_node
6429
    # distribute new instance config to the other nodes
6430
    self.cfg.Update(instance, self.feedback_fn)
6431

    
6432
    result = self.rpc.call_finalize_migration(target_node,
6433
                                              instance,
6434
                                              migration_info,
6435
                                              True)
6436
    msg = result.fail_msg
6437
    if msg:
6438
      logging.error("Instance migration succeeded, but finalization failed:"
6439
                    " %s", msg)
6440
      raise errors.OpExecError("Could not finalize instance migration: %s" %
6441
                               msg)
6442

    
6443
    self._EnsureSecondary(source_node)
6444
    self._WaitUntilSync()
6445
    self._GoStandalone()
6446
    self._GoReconnect(False)
6447
    self._WaitUntilSync()
6448

    
6449
    self.feedback_fn("* done")
6450

    
6451
  def Exec(self, feedback_fn):
6452
    """Perform the migration.
6453

6454
    """
6455
    feedback_fn("Migrating instance %s" % self.instance.name)
6456

    
6457
    self.feedback_fn = feedback_fn
6458

    
6459
    self.source_node = self.instance.primary_node
6460
    self.target_node = self.instance.secondary_nodes[0]
6461
    self.all_nodes = [self.source_node, self.target_node]
6462
    self.nodes_ip = {
6463
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
6464
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
6465
      }
6466

    
6467
    if self.cleanup:
6468
      return self._ExecCleanup()
6469
    else:
6470
      return self._ExecMigration()
6471

    
6472

    
6473
def _CreateBlockDev(lu, node, instance, device, force_create,
6474
                    info, force_open):
6475
  """Create a tree of block devices on a given node.
6476

6477
  If this device type has to be created on secondaries, create it and
6478
  all its children.
6479

6480
  If not, just recurse to children keeping the same 'force' value.
6481

6482
  @param lu: the lu on whose behalf we execute
6483
  @param node: the node on which to create the device
6484
  @type instance: L{objects.Instance}
6485
  @param instance: the instance which owns the device
6486
  @type device: L{objects.Disk}
6487
  @param device: the device to create
6488
  @type force_create: boolean
6489
  @param force_create: whether to force creation of this device; this
6490
      will be change to True whenever we find a device which has
6491
      CreateOnSecondary() attribute
6492
  @param info: the extra 'metadata' we should attach to the device
6493
      (this will be represented as a LVM tag)
6494
  @type force_open: boolean
6495
  @param force_open: this parameter will be passes to the
6496
      L{backend.BlockdevCreate} function where it specifies
6497
      whether we run on primary or not, and it affects both
6498
      the child assembly and the device own Open() execution
6499

6500
  """
6501
  if device.CreateOnSecondary():
6502
    force_create = True
6503

    
6504
  if device.children:
6505
    for child in device.children:
6506
      _CreateBlockDev(lu, node, instance, child, force_create,
6507
                      info, force_open)
6508

    
6509
  if not force_create:
6510
    return
6511

    
6512
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
6513

    
6514

    
6515
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
6516
  """Create a single block device on a given node.
6517

6518
  This will not recurse over children of the device, so they must be
6519
  created in advance.
6520

6521
  @param lu: the lu on whose behalf we execute
6522
  @param node: the node on which to create the device
6523
  @type instance: L{objects.Instance}
6524
  @param instance: the instance which owns the device
6525
  @type device: L{objects.Disk}
6526
  @param device: the device to create
6527
  @param info: the extra 'metadata' we should attach to the device
6528
      (this will be represented as a LVM tag)
6529
  @type force_open: boolean
6530
  @param force_open: this parameter will be passes to the
6531
      L{backend.BlockdevCreate} function where it specifies
6532
      whether we run on primary or not, and it affects both
6533
      the child assembly and the device own Open() execution
6534

6535
  """
6536
  lu.cfg.SetDiskID(device, node)
6537
  result = lu.rpc.call_blockdev_create(node, device, device.size,
6538
                                       instance.name, force_open, info)
6539
  result.Raise("Can't create block device %s on"
6540
               " node %s for instance %s" % (device, node, instance.name))
6541
  if device.physical_id is None:
6542
    device.physical_id = result.payload
6543

    
6544

    
6545
def _GenerateUniqueNames(lu, exts):
6546
  """Generate a suitable LV name.
6547

6548
  This will generate a logical volume name for the given instance.
6549

6550
  """
6551
  results = []
6552
  for val in exts:
6553
    new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
6554
    results.append("%s%s" % (new_id, val))
6555
  return results
6556

    
6557

    
6558
def _GenerateDRBD8Branch(lu, primary, secondary, size, vgname, names, iv_name,
6559
                         p_minor, s_minor):
6560
  """Generate a drbd8 device complete with its children.
6561

6562
  """
6563
  port = lu.cfg.AllocatePort()
6564
  shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
6565
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
6566
                          logical_id=(vgname, names[0]))
6567
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
6568
                          logical_id=(vgname, names[1]))
6569
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
6570
                          logical_id=(primary, secondary, port,
6571
                                      p_minor, s_minor,
6572
                                      shared_secret),
6573
                          children=[dev_data, dev_meta],
6574
                          iv_name=iv_name)
6575
  return drbd_dev
6576

    
6577

    
6578
def _GenerateDiskTemplate(lu, template_name,
6579
                          instance_name, primary_node,
6580
                          secondary_nodes, disk_info,
6581
                          file_storage_dir, file_driver,
6582
                          base_index, feedback_fn):
6583
  """Generate the entire disk layout for a given template type.
6584

6585
  """
6586
  #TODO: compute space requirements
6587

    
6588
  vgname = lu.cfg.GetVGName()
6589
  disk_count = len(disk_info)
6590
  disks = []
6591
  if template_name == constants.DT_DISKLESS:
6592
    pass
6593
  elif template_name == constants.DT_PLAIN:
6594
    if len(secondary_nodes) != 0:
6595
      raise errors.ProgrammerError("Wrong template configuration")
6596

    
6597
    names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
6598
                                      for i in range(disk_count)])
6599
    for idx, disk in enumerate(disk_info):
6600
      disk_index = idx + base_index
6601
      vg = disk.get("vg", vgname)
6602
      feedback_fn("* disk %i, vg %s, name %s" % (idx, vg, names[idx]))
6603
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
6604
                              logical_id=(vg, names[idx]),
6605
                              iv_name="disk/%d" % disk_index,
6606
                              mode=disk["mode"])
6607
      disks.append(disk_dev)
6608
  elif template_name == constants.DT_DRBD8:
6609
    if len(secondary_nodes) != 1:
6610
      raise errors.ProgrammerError("Wrong template configuration")
6611
    remote_node = secondary_nodes[0]
6612
    minors = lu.cfg.AllocateDRBDMinor(
6613
      [primary_node, remote_node] * len(disk_info), instance_name)
6614

    
6615
    names = []
6616
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
6617
                                               for i in range(disk_count)]):
6618
      names.append(lv_prefix + "_data")
6619
      names.append(lv_prefix + "_meta")
6620
    for idx, disk in enumerate(disk_info):
6621
      disk_index = idx + base_index
6622
      vg = disk.get("vg", vgname)
6623
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
6624
                                      disk["size"], vg, names[idx*2:idx*2+2],
6625
                                      "disk/%d" % disk_index,
6626
                                      minors[idx*2], minors[idx*2+1])
6627
      disk_dev.mode = disk["mode"]
6628
      disks.append(disk_dev)
6629
  elif template_name == constants.DT_FILE:
6630
    if len(secondary_nodes) != 0:
6631
      raise errors.ProgrammerError("Wrong template configuration")
6632

    
6633
    opcodes.RequireFileStorage()
6634

    
6635
    for idx, disk in enumerate(disk_info):
6636
      disk_index = idx + base_index
6637
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
6638
                              iv_name="disk/%d" % disk_index,
6639
                              logical_id=(file_driver,
6640
                                          "%s/disk%d" % (file_storage_dir,
6641
                                                         disk_index)),
6642
                              mode=disk["mode"])
6643
      disks.append(disk_dev)
6644
  elif template_name == constants.DT_SHARED_FILE:
6645
    if len(secondary_nodes) != 0:
6646
      raise errors.ProgrammerError("Wrong template configuration")
6647

    
6648
    opcodes.RequireSharedFileStorage()
6649

    
6650
    for idx, disk in enumerate(disk_info):
6651
      disk_index = idx + base_index
6652
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
6653
                              iv_name="disk/%d" % disk_index,
6654
                              logical_id=(file_driver,
6655
                                          "%s/disk%d" % (file_storage_dir,
6656
                                                         disk_index)),
6657
                              mode=disk["mode"])
6658
      disks.append(disk_dev)
6659
  else:
6660
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
6661
  return disks
6662

    
6663

    
6664
def _GetInstanceInfoText(instance):
6665
  """Compute that text that should be added to the disk's metadata.
6666

6667
  """
6668
  return "originstname+%s" % instance.name
6669

    
6670

    
6671
def _CalcEta(time_taken, written, total_size):
6672
  """Calculates the ETA based on size written and total size.
6673

6674
  @param time_taken: The time taken so far
6675
  @param written: amount written so far
6676
  @param total_size: The total size of data to be written
6677
  @return: The remaining time in seconds
6678

6679
  """
6680
  avg_time = time_taken / float(written)
6681
  return (total_size - written) * avg_time
6682

    
6683

    
6684
def _WipeDisks(lu, instance):
6685
  """Wipes instance disks.
6686

6687
  @type lu: L{LogicalUnit}
6688
  @param lu: the logical unit on whose behalf we execute
6689
  @type instance: L{objects.Instance}
6690
  @param instance: the instance whose disks we should create
6691
  @return: the success of the wipe
6692

6693
  """
6694
  node = instance.primary_node
6695
  logging.info("Pause sync of instance %s disks", instance.name)
6696
  result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, True)
6697

    
6698
  for idx, success in enumerate(result.payload):
6699
    if not success:
6700
      logging.warn("pause-sync of instance %s for disks %d failed",
6701
                   instance.name, idx)
6702

    
6703
  try:
6704
    for idx, device in enumerate(instance.disks):
6705
      lu.LogInfo("* Wiping disk %d", idx)
6706
      logging.info("Wiping disk %d for instance %s", idx, instance.name)
6707

    
6708
      # The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
6709
      # MAX_WIPE_CHUNK at max
6710
      wipe_chunk_size = min(constants.MAX_WIPE_CHUNK, device.size / 100.0 *
6711
                            constants.MIN_WIPE_CHUNK_PERCENT)
6712

    
6713
      offset = 0
6714
      size = device.size
6715
      last_output = 0
6716
      start_time = time.time()
6717

    
6718
      while offset < size:
6719
        wipe_size = min(wipe_chunk_size, size - offset)
6720
        result = lu.rpc.call_blockdev_wipe(node, device, offset, wipe_size)
6721
        result.Raise("Could not wipe disk %d at offset %d for size %d" %
6722
                     (idx, offset, wipe_size))
6723
        now = time.time()
6724
        offset += wipe_size
6725
        if now - last_output >= 60:
6726
          eta = _CalcEta(now - start_time, offset, size)
6727
          lu.LogInfo(" - done: %.1f%% ETA: %s" %
6728
                     (offset / float(size) * 100, utils.FormatSeconds(eta)))
6729
          last_output = now
6730
  finally:
6731
    logging.info("Resume sync of instance %s disks", instance.name)
6732

    
6733
    result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, False)
6734

    
6735
    for idx, success in enumerate(result.payload):
6736
      if not success:
6737
        lu.LogWarning("Warning: Resume sync of disk %d failed. Please have a"
6738
                      " look at the status and troubleshoot the issue.", idx)
6739
        logging.warn("resume-sync of instance %s for disks %d failed",
6740
                     instance.name, idx)
6741

    
6742

    
6743
def _CreateDisks(lu, instance, to_skip=None, target_node=None):
6744
  """Create all disks for an instance.
6745

6746
  This abstracts away some work from AddInstance.
6747

6748
  @type lu: L{LogicalUnit}
6749
  @param lu: the logical unit on whose behalf we execute
6750
  @type instance: L{objects.Instance}
6751
  @param instance: the instance whose disks we should create
6752
  @type to_skip: list
6753
  @param to_skip: list of indices to skip
6754
  @type target_node: string
6755
  @param target_node: if passed, overrides the target node for creation
6756
  @rtype: boolean
6757
  @return: the success of the creation
6758

6759
  """
6760
  info = _GetInstanceInfoText(instance)
6761
  if target_node is None:
6762
    pnode = instance.primary_node
6763
    all_nodes = instance.all_nodes
6764
  else:
6765
    pnode = target_node
6766
    all_nodes = [pnode]
6767

    
6768
  if instance.disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE):
6769
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
6770
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
6771

    
6772
    result.Raise("Failed to create directory '%s' on"
6773
                 " node %s" % (file_storage_dir, pnode))
6774

    
6775
  # Note: this needs to be kept in sync with adding of disks in
6776
  # LUInstanceSetParams
6777
  for idx, device in enumerate(instance.disks):
6778
    if to_skip and idx in to_skip:
6779
      continue
6780
    logging.info("Creating volume %s for instance %s",
6781
                 device.iv_name, instance.name)
6782
    #HARDCODE
6783
    for node in all_nodes:
6784
      f_create = node == pnode
6785
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
6786

    
6787

    
6788
def _RemoveDisks(lu, instance, target_node=None):
6789
  """Remove all disks for an instance.
6790

6791
  This abstracts away some work from `AddInstance()` and
6792
  `RemoveInstance()`. Note that in case some of the devices couldn't
6793
  be removed, the removal will continue with the other ones (compare
6794
  with `_CreateDisks()`).
6795

6796
  @type lu: L{LogicalUnit}
6797
  @param lu: the logical unit on whose behalf we execute
6798
  @type instance: L{objects.Instance}
6799
  @param instance: the instance whose disks we should remove
6800
  @type target_node: string
6801
  @param target_node: used to override the node on which to remove the disks
6802
  @rtype: boolean
6803
  @return: the success of the removal
6804

6805
  """
6806
  logging.info("Removing block devices for instance %s", instance.name)
6807

    
6808
  all_result = True
6809
  for device in instance.disks:
6810
    if target_node:
6811
      edata = [(target_node, device)]
6812
    else:
6813
      edata = device.ComputeNodeTree(instance.primary_node)
6814
    for node, disk in edata:
6815
      lu.cfg.SetDiskID(disk, node)
6816
      msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
6817
      if msg:
6818
        lu.LogWarning("Could not remove block device %s on node %s,"
6819
                      " continuing anyway: %s", device.iv_name, node, msg)
6820
        all_result = False
6821

    
6822
  if instance.disk_template == constants.DT_FILE:
6823
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
6824
    if target_node:
6825
      tgt = target_node
6826
    else:
6827
      tgt = instance.primary_node
6828
    result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
6829
    if result.fail_msg:
6830
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
6831
                    file_storage_dir, instance.primary_node, result.fail_msg)
6832
      all_result = False
6833

    
6834
  return all_result
6835

    
6836

    
6837
def _ComputeDiskSizePerVG(disk_template, disks):
6838
  """Compute disk size requirements in the volume group
6839

6840
  """
6841
  def _compute(disks, payload):
6842
    """Universal algorithm
6843

6844
    """
6845
    vgs = {}
6846
    for disk in disks:
6847
      vgs[disk["vg"]] = vgs.get("vg", 0) + disk["size"] + payload
6848

    
6849
    return vgs
6850

    
6851
  # Required free disk space as a function of disk and swap space
6852
  req_size_dict = {
6853
    constants.DT_DISKLESS: {},
6854
    constants.DT_PLAIN: _compute(disks, 0),
6855
    # 128 MB are added for drbd metadata for each disk
6856
    constants.DT_DRBD8: _compute(disks, 128),
6857
    constants.DT_FILE: {},
6858
    constants.DT_SHARED_FILE: {},
6859
  }
6860

    
6861
  if disk_template not in req_size_dict:
6862
    raise errors.ProgrammerError("Disk template '%s' size requirement"
6863
                                 " is unknown" %  disk_template)
6864

    
6865
  return req_size_dict[disk_template]
6866

    
6867

    
6868
def _ComputeDiskSize(disk_template, disks):
6869
  """Compute disk size requirements in the volume group
6870

6871
  """
6872
  # Required free disk space as a function of disk and swap space
6873
  req_size_dict = {
6874
    constants.DT_DISKLESS: None,
6875
    constants.DT_PLAIN: sum(d["size"] for d in disks),
6876
    # 128 MB are added for drbd metadata for each disk
6877
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
6878
    constants.DT_FILE: None,
6879
    constants.DT_SHARED_FILE: 0,
6880
  }
6881

    
6882
  if disk_template not in req_size_dict:
6883
    raise errors.ProgrammerError("Disk template '%s' size requirement"
6884
                                 " is unknown" %  disk_template)
6885

    
6886
  return req_size_dict[disk_template]
6887

    
6888

    
6889
def _FilterVmNodes(lu, nodenames):
6890
  """Filters out non-vm_capable nodes from a list.
6891

6892
  @type lu: L{LogicalUnit}
6893
  @param lu: the logical unit for which we check
6894
  @type nodenames: list
6895
  @param nodenames: the list of nodes on which we should check
6896
  @rtype: list
6897
  @return: the list of vm-capable nodes
6898

6899
  """
6900
  vm_nodes = frozenset(lu.cfg.GetNonVmCapableNodeList())
6901
  return [name for name in nodenames if name not in vm_nodes]
6902

    
6903

    
6904
def _CheckHVParams(lu, nodenames, hvname, hvparams):
6905
  """Hypervisor parameter validation.
6906

6907
  This function abstract the hypervisor parameter validation to be
6908
  used in both instance create and instance modify.
6909

6910
  @type lu: L{LogicalUnit}
6911
  @param lu: the logical unit for which we check
6912
  @type nodenames: list
6913
  @param nodenames: the list of nodes on which we should check
6914
  @type hvname: string
6915
  @param hvname: the name of the hypervisor we should use
6916
  @type hvparams: dict
6917
  @param hvparams: the parameters which we need to check
6918
  @raise errors.OpPrereqError: if the parameters are not valid
6919

6920
  """
6921
  nodenames = _FilterVmNodes(lu, nodenames)
6922
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
6923
                                                  hvname,
6924
                                                  hvparams)
6925
  for node in nodenames:
6926
    info = hvinfo[node]
6927
    if info.offline:
6928
      continue
6929
    info.Raise("Hypervisor parameter validation failed on node %s" % node)
6930

    
6931

    
6932
def _CheckOSParams(lu, required, nodenames, osname, osparams):
6933
  """OS parameters validation.
6934

6935
  @type lu: L{LogicalUnit}
6936
  @param lu: the logical unit for which we check
6937
  @type required: boolean
6938
  @param required: whether the validation should fail if the OS is not
6939
      found
6940
  @type nodenames: list
6941
  @param nodenames: the list of nodes on which we should check
6942
  @type osname: string
6943
  @param osname: the name of the hypervisor we should use
6944
  @type osparams: dict
6945
  @param osparams: the parameters which we need to check
6946
  @raise errors.OpPrereqError: if the parameters are not valid
6947

6948
  """
6949
  nodenames = _FilterVmNodes(lu, nodenames)
6950
  result = lu.rpc.call_os_validate(required, nodenames, osname,
6951
                                   [constants.OS_VALIDATE_PARAMETERS],
6952
                                   osparams)
6953
  for node, nres in result.items():
6954
    # we don't check for offline cases since this should be run only
6955
    # against the master node and/or an instance's nodes
6956
    nres.Raise("OS Parameters validation failed on node %s" % node)
6957
    if not nres.payload:
6958
      lu.LogInfo("OS %s not found on node %s, validation skipped",
6959
                 osname, node)
6960

    
6961

    
6962
class LUInstanceCreate(LogicalUnit):
6963
  """Create an instance.
6964

6965
  """
6966
  HPATH = "instance-add"
6967
  HTYPE = constants.HTYPE_INSTANCE
6968
  REQ_BGL = False
6969

    
6970
  def CheckArguments(self):
6971
    """Check arguments.
6972

6973
    """
6974
    # do not require name_check to ease forward/backward compatibility
6975
    # for tools
6976
    if self.op.no_install and self.op.start:
6977
      self.LogInfo("No-installation mode selected, disabling startup")
6978
      self.op.start = False
6979
    # validate/normalize the instance name
6980
    self.op.instance_name = \
6981
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
6982

    
6983
    if self.op.ip_check and not self.op.name_check:
6984
      # TODO: make the ip check more flexible and not depend on the name check
6985
      raise errors.OpPrereqError("Cannot do ip check without a name check",
6986
                                 errors.ECODE_INVAL)
6987

    
6988
    # check nics' parameter names
6989
    for nic in self.op.nics:
6990
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
6991

    
6992
    # check disks. parameter names and consistent adopt/no-adopt strategy
6993
    has_adopt = has_no_adopt = False
6994
    for disk in self.op.disks:
6995
      utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
6996
      if "adopt" in disk:
6997
        has_adopt = True
6998
      else:
6999
        has_no_adopt = True
7000
    if has_adopt and has_no_adopt:
7001
      raise errors.OpPrereqError("Either all disks are adopted or none is",
7002
                                 errors.ECODE_INVAL)
7003
    if has_adopt:
7004
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
7005
        raise errors.OpPrereqError("Disk adoption is not supported for the"
7006
                                   " '%s' disk template" %
7007
                                   self.op.disk_template,
7008
                                   errors.ECODE_INVAL)
7009
      if self.op.iallocator is not None:
7010
        raise errors.OpPrereqError("Disk adoption not allowed with an"
7011
                                   " iallocator script", errors.ECODE_INVAL)
7012
      if self.op.mode == constants.INSTANCE_IMPORT:
7013
        raise errors.OpPrereqError("Disk adoption not allowed for"
7014
                                   " instance import", errors.ECODE_INVAL)
7015

    
7016
    self.adopt_disks = has_adopt
7017

    
7018
    # instance name verification
7019
    if self.op.name_check:
7020
      self.hostname1 = netutils.GetHostname(name=self.op.instance_name)
7021
      self.op.instance_name = self.hostname1.name
7022
      # used in CheckPrereq for ip ping check
7023
      self.check_ip = self.hostname1.ip
7024
    else:
7025
      self.check_ip = None
7026

    
7027
    # file storage checks
7028
    if (self.op.file_driver and
7029
        not self.op.file_driver in constants.FILE_DRIVER):
7030
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
7031
                                 self.op.file_driver, errors.ECODE_INVAL)
7032

    
7033
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
7034
      raise errors.OpPrereqError("File storage directory path not absolute",
7035
                                 errors.ECODE_INVAL)
7036

    
7037
    ### Node/iallocator related checks
7038
    _CheckIAllocatorOrNode(self, "iallocator", "pnode")
7039

    
7040
    if self.op.pnode is not None:
7041
      if self.op.disk_template in constants.DTS_NET_MIRROR:
7042
        if self.op.snode is None:
7043
          raise errors.OpPrereqError("The networked disk templates need"
7044
                                     " a mirror node", errors.ECODE_INVAL)
7045
      elif self.op.snode:
7046
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
7047
                        " template")
7048
        self.op.snode = None
7049

    
7050
    self._cds = _GetClusterDomainSecret()
7051

    
7052
    if self.op.mode == constants.INSTANCE_IMPORT:
7053
      # On import force_variant must be True, because if we forced it at
7054
      # initial install, our only chance when importing it back is that it
7055
      # works again!
7056
      self.op.force_variant = True
7057

    
7058
      if self.op.no_install:
7059
        self.LogInfo("No-installation mode has no effect during import")
7060

    
7061
    elif self.op.mode == constants.INSTANCE_CREATE:
7062
      if self.op.os_type is None:
7063
        raise errors.OpPrereqError("No guest OS specified",
7064
                                   errors.ECODE_INVAL)
7065
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
7066
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
7067
                                   " installation" % self.op.os_type,
7068
                                   errors.ECODE_STATE)
7069
      if self.op.disk_template is None:
7070
        raise errors.OpPrereqError("No disk template specified",
7071
                                   errors.ECODE_INVAL)
7072

    
7073
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
7074
      # Check handshake to ensure both clusters have the same domain secret
7075
      src_handshake = self.op.source_handshake
7076
      if not src_handshake:
7077
        raise errors.OpPrereqError("Missing source handshake",
7078
                                   errors.ECODE_INVAL)
7079

    
7080
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
7081
                                                           src_handshake)
7082
      if errmsg:
7083
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
7084
                                   errors.ECODE_INVAL)
7085

    
7086
      # Load and check source CA
7087
      self.source_x509_ca_pem = self.op.source_x509_ca
7088
      if not self.source_x509_ca_pem:
7089
        raise errors.OpPrereqError("Missing source X509 CA",
7090
                                   errors.ECODE_INVAL)
7091

    
7092
      try:
7093
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
7094
                                                    self._cds)
7095
      except OpenSSL.crypto.Error, err:
7096
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
7097
                                   (err, ), errors.ECODE_INVAL)
7098

    
7099
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
7100
      if errcode is not None:
7101
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
7102
                                   errors.ECODE_INVAL)
7103

    
7104
      self.source_x509_ca = cert
7105

    
7106
      src_instance_name = self.op.source_instance_name
7107
      if not src_instance_name:
7108
        raise errors.OpPrereqError("Missing source instance name",
7109
                                   errors.ECODE_INVAL)
7110

    
7111
      self.source_instance_name = \
7112
          netutils.GetHostname(name=src_instance_name).name
7113

    
7114
    else:
7115
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
7116
                                 self.op.mode, errors.ECODE_INVAL)
7117

    
7118
  def ExpandNames(self):
7119
    """ExpandNames for CreateInstance.
7120

7121
    Figure out the right locks for instance creation.
7122

7123
    """
7124
    self.needed_locks = {}
7125

    
7126
    instance_name = self.op.instance_name
7127
    # this is just a preventive check, but someone might still add this
7128
    # instance in the meantime, and creation will fail at lock-add time
7129
    if instance_name in self.cfg.GetInstanceList():
7130
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
7131
                                 instance_name, errors.ECODE_EXISTS)
7132

    
7133
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
7134

    
7135
    if self.op.iallocator:
7136
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7137
    else:
7138
      self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
7139
      nodelist = [self.op.pnode]
7140
      if self.op.snode is not None:
7141
        self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
7142
        nodelist.append(self.op.snode)
7143
      self.needed_locks[locking.LEVEL_NODE] = nodelist
7144

    
7145
    # in case of import lock the source node too
7146
    if self.op.mode == constants.INSTANCE_IMPORT:
7147
      src_node = self.op.src_node
7148
      src_path = self.op.src_path
7149

    
7150
      if src_path is None:
7151
        self.op.src_path = src_path = self.op.instance_name
7152

    
7153
      if src_node is None:
7154
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7155
        self.op.src_node = None
7156
        if os.path.isabs(src_path):
7157
          raise errors.OpPrereqError("Importing an instance from an absolute"
7158
                                     " path requires a source node option.",
7159
                                     errors.ECODE_INVAL)
7160
      else:
7161
        self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
7162
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
7163
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
7164
        if not os.path.isabs(src_path):
7165
          self.op.src_path = src_path = \
7166
            utils.PathJoin(constants.EXPORT_DIR, src_path)
7167

    
7168
  def _RunAllocator(self):
7169
    """Run the allocator based on input opcode.
7170

7171
    """
7172
    nics = [n.ToDict() for n in self.nics]
7173
    ial = IAllocator(self.cfg, self.rpc,
7174
                     mode=constants.IALLOCATOR_MODE_ALLOC,
7175
                     name=self.op.instance_name,
7176
                     disk_template=self.op.disk_template,
7177
                     tags=[],
7178
                     os=self.op.os_type,
7179
                     vcpus=self.be_full[constants.BE_VCPUS],
7180
                     mem_size=self.be_full[constants.BE_MEMORY],
7181
                     disks=self.disks,
7182
                     nics=nics,
7183
                     hypervisor=self.op.hypervisor,
7184
                     )
7185

    
7186
    ial.Run(self.op.iallocator)
7187

    
7188
    if not ial.success:
7189
      raise errors.OpPrereqError("Can't compute nodes using"
7190
                                 " iallocator '%s': %s" %
7191
                                 (self.op.iallocator, ial.info),
7192
                                 errors.ECODE_NORES)
7193
    if len(ial.result) != ial.required_nodes:
7194
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
7195
                                 " of nodes (%s), required %s" %
7196
                                 (self.op.iallocator, len(ial.result),
7197
                                  ial.required_nodes), errors.ECODE_FAULT)
7198
    self.op.pnode = ial.result[0]
7199
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
7200
                 self.op.instance_name, self.op.iallocator,
7201
                 utils.CommaJoin(ial.result))
7202
    if ial.required_nodes == 2:
7203
      self.op.snode = ial.result[1]
7204

    
7205
  def BuildHooksEnv(self):
7206
    """Build hooks env.
7207

7208
    This runs on master, primary and secondary nodes of the instance.
7209

7210
    """
7211
    env = {
7212
      "ADD_MODE": self.op.mode,
7213
      }
7214
    if self.op.mode == constants.INSTANCE_IMPORT:
7215
      env["SRC_NODE"] = self.op.src_node
7216
      env["SRC_PATH"] = self.op.src_path
7217
      env["SRC_IMAGES"] = self.src_images
7218

    
7219
    env.update(_BuildInstanceHookEnv(
7220
      name=self.op.instance_name,
7221
      primary_node=self.op.pnode,
7222
      secondary_nodes=self.secondaries,
7223
      status=self.op.start,
7224
      os_type=self.op.os_type,
7225
      memory=self.be_full[constants.BE_MEMORY],
7226
      vcpus=self.be_full[constants.BE_VCPUS],
7227
      nics=_NICListToTuple(self, self.nics),
7228
      disk_template=self.op.disk_template,
7229
      disks=[(d["size"], d["mode"]) for d in self.disks],
7230
      bep=self.be_full,
7231
      hvp=self.hv_full,
7232
      hypervisor_name=self.op.hypervisor,
7233
    ))
7234

    
7235
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
7236
          self.secondaries)
7237
    return env, nl, nl
7238

    
7239
  def _ReadExportInfo(self):
7240
    """Reads the export information from disk.
7241

7242
    It will override the opcode source node and path with the actual
7243
    information, if these two were not specified before.
7244

7245
    @return: the export information
7246

7247
    """
7248
    assert self.op.mode == constants.INSTANCE_IMPORT
7249

    
7250
    src_node = self.op.src_node
7251
    src_path = self.op.src_path
7252

    
7253
    if src_node is None:
7254
      locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
7255
      exp_list = self.rpc.call_export_list(locked_nodes)
7256
      found = False
7257
      for node in exp_list:
7258
        if exp_list[node].fail_msg:
7259
          continue
7260
        if src_path in exp_list[node].payload:
7261
          found = True
7262
          self.op.src_node = src_node = node
7263
          self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
7264
                                                       src_path)
7265
          break
7266
      if not found:
7267
        raise errors.OpPrereqError("No export found for relative path %s" %
7268
                                    src_path, errors.ECODE_INVAL)
7269

    
7270
    _CheckNodeOnline(self, src_node)
7271
    result = self.rpc.call_export_info(src_node, src_path)
7272
    result.Raise("No export or invalid export found in dir %s" % src_path)
7273

    
7274
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
7275
    if not export_info.has_section(constants.INISECT_EXP):
7276
      raise errors.ProgrammerError("Corrupted export config",
7277
                                   errors.ECODE_ENVIRON)
7278

    
7279
    ei_version = export_info.get(constants.INISECT_EXP, "version")
7280
    if (int(ei_version) != constants.EXPORT_VERSION):
7281
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
7282
                                 (ei_version, constants.EXPORT_VERSION),
7283
                                 errors.ECODE_ENVIRON)
7284
    return export_info
7285

    
7286
  def _ReadExportParams(self, einfo):
7287
    """Use export parameters as defaults.
7288

7289
    In case the opcode doesn't specify (as in override) some instance
7290
    parameters, then try to use them from the export information, if
7291
    that declares them.
7292

7293
    """
7294
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
7295

    
7296
    if self.op.disk_template is None:
7297
      if einfo.has_option(constants.INISECT_INS, "disk_template"):
7298
        self.op.disk_template = einfo.get(constants.INISECT_INS,
7299
                                          "disk_template")
7300
      else:
7301
        raise errors.OpPrereqError("No disk template specified and the export"
7302
                                   " is missing the disk_template information",
7303
                                   errors.ECODE_INVAL)
7304

    
7305
    if not self.op.disks:
7306
      if einfo.has_option(constants.INISECT_INS, "disk_count"):
7307
        disks = []
7308
        # TODO: import the disk iv_name too
7309
        for idx in range(einfo.getint(constants.INISECT_INS, "disk_count")):
7310
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
7311
          disks.append({"size": disk_sz})
7312
        self.op.disks = disks
7313
      else:
7314
        raise errors.OpPrereqError("No disk info specified and the export"
7315
                                   " is missing the disk information",
7316
                                   errors.ECODE_INVAL)
7317

    
7318
    if (not self.op.nics and
7319
        einfo.has_option(constants.INISECT_INS, "nic_count")):
7320
      nics = []
7321
      for idx in range(einfo.getint(constants.INISECT_INS, "nic_count")):
7322
        ndict = {}
7323
        for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
7324
          v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
7325
          ndict[name] = v
7326
        nics.append(ndict)
7327
      self.op.nics = nics
7328

    
7329
    if (self.op.hypervisor is None and
7330
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
7331
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
7332
    if einfo.has_section(constants.INISECT_HYP):
7333
      # use the export parameters but do not override the ones
7334
      # specified by the user
7335
      for name, value in einfo.items(constants.INISECT_HYP):
7336
        if name not in self.op.hvparams:
7337
          self.op.hvparams[name] = value
7338

    
7339
    if einfo.has_section(constants.INISECT_BEP):
7340
      # use the parameters, without overriding
7341
      for name, value in einfo.items(constants.INISECT_BEP):
7342
        if name not in self.op.beparams:
7343
          self.op.beparams[name] = value
7344
    else:
7345
      # try to read the parameters old style, from the main section
7346
      for name in constants.BES_PARAMETERS:
7347
        if (name not in self.op.beparams and
7348
            einfo.has_option(constants.INISECT_INS, name)):
7349
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
7350

    
7351
    if einfo.has_section(constants.INISECT_OSP):
7352
      # use the parameters, without overriding
7353
      for name, value in einfo.items(constants.INISECT_OSP):
7354
        if name not in self.op.osparams:
7355
          self.op.osparams[name] = value
7356

    
7357
  def _RevertToDefaults(self, cluster):
7358
    """Revert the instance parameters to the default values.
7359

7360
    """
7361
    # hvparams
7362
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
7363
    for name in self.op.hvparams.keys():
7364
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
7365
        del self.op.hvparams[name]
7366
    # beparams
7367
    be_defs = cluster.SimpleFillBE({})
7368
    for name in self.op.beparams.keys():
7369
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
7370
        del self.op.beparams[name]
7371
    # nic params
7372
    nic_defs = cluster.SimpleFillNIC({})
7373
    for nic in self.op.nics:
7374
      for name in constants.NICS_PARAMETERS:
7375
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
7376
          del nic[name]
7377
    # osparams
7378
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
7379
    for name in self.op.osparams.keys():
7380
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
7381
        del self.op.osparams[name]
7382

    
7383
  def CheckPrereq(self):
7384
    """Check prerequisites.
7385

7386
    """
7387
    if self.op.mode == constants.INSTANCE_IMPORT:
7388
      export_info = self._ReadExportInfo()
7389
      self._ReadExportParams(export_info)
7390

    
7391
    if (not self.cfg.GetVGName() and
7392
        self.op.disk_template not in constants.DTS_NOT_LVM):
7393
      raise errors.OpPrereqError("Cluster does not support lvm-based"
7394
                                 " instances", errors.ECODE_STATE)
7395

    
7396
    if self.op.hypervisor is None:
7397
      self.op.hypervisor = self.cfg.GetHypervisorType()
7398

    
7399
    cluster = self.cfg.GetClusterInfo()
7400
    enabled_hvs = cluster.enabled_hypervisors
7401
    if self.op.hypervisor not in enabled_hvs:
7402
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
7403
                                 " cluster (%s)" % (self.op.hypervisor,
7404
                                  ",".join(enabled_hvs)),
7405
                                 errors.ECODE_STATE)
7406

    
7407
    # check hypervisor parameter syntax (locally)
7408
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
7409
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
7410
                                      self.op.hvparams)
7411
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
7412
    hv_type.CheckParameterSyntax(filled_hvp)
7413
    self.hv_full = filled_hvp
7414
    # check that we don't specify global parameters on an instance
7415
    _CheckGlobalHvParams(self.op.hvparams)
7416

    
7417
    # fill and remember the beparams dict
7418
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
7419
    self.be_full = cluster.SimpleFillBE(self.op.beparams)
7420

    
7421
    # build os parameters
7422
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
7423

    
7424
    # now that hvp/bep are in final format, let's reset to defaults,
7425
    # if told to do so
7426
    if self.op.identify_defaults:
7427
      self._RevertToDefaults(cluster)
7428

    
7429
    # NIC buildup
7430
    self.nics = []
7431
    for idx, nic in enumerate(self.op.nics):
7432
      nic_mode_req = nic.get("mode", None)
7433
      nic_mode = nic_mode_req
7434
      if nic_mode is None:
7435
        nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
7436

    
7437
      # in routed mode, for the first nic, the default ip is 'auto'
7438
      if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
7439
        default_ip_mode = constants.VALUE_AUTO
7440
      else:
7441
        default_ip_mode = constants.VALUE_NONE
7442

    
7443
      # ip validity checks
7444
      ip = nic.get("ip", default_ip_mode)
7445
      if ip is None or ip.lower() == constants.VALUE_NONE:
7446
        nic_ip = None
7447
      elif ip.lower() == constants.VALUE_AUTO:
7448
        if not self.op.name_check:
7449
          raise errors.OpPrereqError("IP address set to auto but name checks"
7450
                                     " have been skipped",
7451
                                     errors.ECODE_INVAL)
7452
        nic_ip = self.hostname1.ip
7453
      else:
7454
        if not netutils.IPAddress.IsValid(ip):
7455
          raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
7456
                                     errors.ECODE_INVAL)
7457
        nic_ip = ip
7458

    
7459
      # TODO: check the ip address for uniqueness
7460
      if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
7461
        raise errors.OpPrereqError("Routed nic mode requires an ip address",
7462
                                   errors.ECODE_INVAL)
7463

    
7464
      # MAC address verification
7465
      mac = nic.get("mac", constants.VALUE_AUTO)
7466
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7467
        mac = utils.NormalizeAndValidateMac(mac)
7468

    
7469
        try:
7470
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
7471
        except errors.ReservationError:
7472
          raise errors.OpPrereqError("MAC address %s already in use"
7473
                                     " in cluster" % mac,
7474
                                     errors.ECODE_NOTUNIQUE)
7475

    
7476
      # bridge verification
7477
      bridge = nic.get("bridge", None)
7478
      link = nic.get("link", None)
7479
      if bridge and link:
7480
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
7481
                                   " at the same time", errors.ECODE_INVAL)
7482
      elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
7483
        raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic",
7484
                                   errors.ECODE_INVAL)
7485
      elif bridge:
7486
        link = bridge
7487

    
7488
      nicparams = {}
7489
      if nic_mode_req:
7490
        nicparams[constants.NIC_MODE] = nic_mode_req
7491
      if link:
7492
        nicparams[constants.NIC_LINK] = link
7493

    
7494
      check_params = cluster.SimpleFillNIC(nicparams)
7495
      objects.NIC.CheckParameterSyntax(check_params)
7496
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
7497

    
7498
    # disk checks/pre-build
7499
    self.disks = []
7500
    for disk in self.op.disks:
7501
      mode = disk.get("mode", constants.DISK_RDWR)
7502
      if mode not in constants.DISK_ACCESS_SET:
7503
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
7504
                                   mode, errors.ECODE_INVAL)
7505
      size = disk.get("size", None)
7506
      if size is None:
7507
        raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
7508
      try:
7509
        size = int(size)
7510
      except (TypeError, ValueError):
7511
        raise errors.OpPrereqError("Invalid disk size '%s'" % size,
7512
                                   errors.ECODE_INVAL)
7513
      vg = disk.get("vg", self.cfg.GetVGName())
7514
      new_disk = {"size": size, "mode": mode, "vg": vg}
7515
      if "adopt" in disk:
7516
        new_disk["adopt"] = disk["adopt"]
7517
      self.disks.append(new_disk)
7518

    
7519
    if self.op.mode == constants.INSTANCE_IMPORT:
7520

    
7521
      # Check that the new instance doesn't have less disks than the export
7522
      instance_disks = len(self.disks)
7523
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
7524
      if instance_disks < export_disks:
7525
        raise errors.OpPrereqError("Not enough disks to import."
7526
                                   " (instance: %d, export: %d)" %
7527
                                   (instance_disks, export_disks),
7528
                                   errors.ECODE_INVAL)
7529

    
7530
      disk_images = []
7531
      for idx in range(export_disks):
7532
        option = 'disk%d_dump' % idx
7533
        if export_info.has_option(constants.INISECT_INS, option):
7534
          # FIXME: are the old os-es, disk sizes, etc. useful?
7535
          export_name = export_info.get(constants.INISECT_INS, option)
7536
          image = utils.PathJoin(self.op.src_path, export_name)
7537
          disk_images.append(image)
7538
        else:
7539
          disk_images.append(False)
7540

    
7541
      self.src_images = disk_images
7542

    
7543
      old_name = export_info.get(constants.INISECT_INS, 'name')
7544
      try:
7545
        exp_nic_count = export_info.getint(constants.INISECT_INS, 'nic_count')
7546
      except (TypeError, ValueError), err:
7547
        raise errors.OpPrereqError("Invalid export file, nic_count is not"
7548
                                   " an integer: %s" % str(err),
7549
                                   errors.ECODE_STATE)
7550
      if self.op.instance_name == old_name:
7551
        for idx, nic in enumerate(self.nics):
7552
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
7553
            nic_mac_ini = 'nic%d_mac' % idx
7554
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
7555

    
7556
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
7557

    
7558
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
7559
    if self.op.ip_check:
7560
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
7561
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
7562
                                   (self.check_ip, self.op.instance_name),
7563
                                   errors.ECODE_NOTUNIQUE)
7564

    
7565
    #### mac address generation
7566
    # By generating here the mac address both the allocator and the hooks get
7567
    # the real final mac address rather than the 'auto' or 'generate' value.
7568
    # There is a race condition between the generation and the instance object
7569
    # creation, which means that we know the mac is valid now, but we're not
7570
    # sure it will be when we actually add the instance. If things go bad
7571
    # adding the instance will abort because of a duplicate mac, and the
7572
    # creation job will fail.
7573
    for nic in self.nics:
7574
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7575
        nic.mac = self.cfg.GenerateMAC(self.proc.GetECId())
7576

    
7577
    #### allocator run
7578

    
7579
    if self.op.iallocator is not None:
7580
      self._RunAllocator()
7581

    
7582
    #### node related checks
7583

    
7584
    # check primary node
7585
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
7586
    assert self.pnode is not None, \
7587
      "Cannot retrieve locked node %s" % self.op.pnode
7588
    if pnode.offline:
7589
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
7590
                                 pnode.name, errors.ECODE_STATE)
7591
    if pnode.drained:
7592
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
7593
                                 pnode.name, errors.ECODE_STATE)
7594
    if not pnode.vm_capable:
7595
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
7596
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
7597

    
7598
    self.secondaries = []
7599

    
7600
    # mirror node verification
7601
    if self.op.disk_template in constants.DTS_NET_MIRROR:
7602
      if self.op.snode == pnode.name:
7603
        raise errors.OpPrereqError("The secondary node cannot be the"
7604
                                   " primary node.", errors.ECODE_INVAL)
7605
      _CheckNodeOnline(self, self.op.snode)
7606
      _CheckNodeNotDrained(self, self.op.snode)
7607
      _CheckNodeVmCapable(self, self.op.snode)
7608
      self.secondaries.append(self.op.snode)
7609

    
7610
    nodenames = [pnode.name] + self.secondaries
7611

    
7612
    if not self.adopt_disks:
7613
      # Check lv size requirements, if not adopting
7614
      req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
7615
      _CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
7616

    
7617
    else: # instead, we must check the adoption data
7618
      all_lvs = set([i["vg"] + "/" + i["adopt"] for i in self.disks])
7619
      if len(all_lvs) != len(self.disks):
7620
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
7621
                                   errors.ECODE_INVAL)
7622
      for lv_name in all_lvs:
7623
        try:
7624
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
7625
          # to ReserveLV uses the same syntax
7626
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
7627
        except errors.ReservationError:
7628
          raise errors.OpPrereqError("LV named %s used by another instance" %
7629
                                     lv_name, errors.ECODE_NOTUNIQUE)
7630

    
7631
      vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
7632
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
7633

    
7634
      node_lvs = self.rpc.call_lv_list([pnode.name],
7635
                                       vg_names.payload.keys())[pnode.name]
7636
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
7637
      node_lvs = node_lvs.payload
7638

    
7639
      delta = all_lvs.difference(node_lvs.keys())
7640
      if delta:
7641
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
7642
                                   utils.CommaJoin(delta),
7643
                                   errors.ECODE_INVAL)
7644
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
7645
      if online_lvs:
7646
        raise errors.OpPrereqError("Online logical volumes found, cannot"
7647
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
7648
                                   errors.ECODE_STATE)
7649
      # update the size of disk based on what is found
7650
      for dsk in self.disks:
7651
        dsk["size"] = int(float(node_lvs[dsk["vg"] + "/" + dsk["adopt"]][0]))
7652

    
7653
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
7654

    
7655
    _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
7656
    # check OS parameters (remotely)
7657
    _CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
7658

    
7659
    _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
7660

    
7661
    # memory check on primary node
7662
    if self.op.start:
7663
      _CheckNodeFreeMemory(self, self.pnode.name,
7664
                           "creating instance %s" % self.op.instance_name,
7665
                           self.be_full[constants.BE_MEMORY],
7666
                           self.op.hypervisor)
7667

    
7668
    self.dry_run_result = list(nodenames)
7669

    
7670
  def Exec(self, feedback_fn):
7671
    """Create and add the instance to the cluster.
7672

7673
    """
7674
    instance = self.op.instance_name
7675
    pnode_name = self.pnode.name
7676

    
7677
    ht_kind = self.op.hypervisor
7678
    if ht_kind in constants.HTS_REQ_PORT:
7679
      network_port = self.cfg.AllocatePort()
7680
    else:
7681
      network_port = None
7682

    
7683
    if constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE:
7684
      # this is needed because os.path.join does not accept None arguments
7685
      if self.op.file_storage_dir is None:
7686
        string_file_storage_dir = ""
7687
      else:
7688
        string_file_storage_dir = self.op.file_storage_dir
7689

    
7690
      # build the full file storage dir path
7691
      if self.op.disk_template == constants.DT_SHARED_FILE:
7692
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
7693
      else:
7694
        get_fsd_fn = self.cfg.GetFileStorageDir
7695

    
7696
      file_storage_dir = utils.PathJoin(get_fsd_fn(),
7697
                                        string_file_storage_dir, instance)
7698
    else:
7699
      file_storage_dir = ""
7700

    
7701
    disks = _GenerateDiskTemplate(self,
7702
                                  self.op.disk_template,
7703
                                  instance, pnode_name,
7704
                                  self.secondaries,
7705
                                  self.disks,
7706
                                  file_storage_dir,
7707
                                  self.op.file_driver,
7708
                                  0,
7709
                                  feedback_fn)
7710

    
7711
    iobj = objects.Instance(name=instance, os=self.op.os_type,
7712
                            primary_node=pnode_name,
7713
                            nics=self.nics, disks=disks,
7714
                            disk_template=self.op.disk_template,
7715
                            admin_up=False,
7716
                            network_port=network_port,
7717
                            beparams=self.op.beparams,
7718
                            hvparams=self.op.hvparams,
7719
                            hypervisor=self.op.hypervisor,
7720
                            osparams=self.op.osparams,
7721
                            )
7722

    
7723
    if self.adopt_disks:
7724
      # rename LVs to the newly-generated names; we need to construct
7725
      # 'fake' LV disks with the old data, plus the new unique_id
7726
      tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
7727
      rename_to = []
7728
      for t_dsk, a_dsk in zip (tmp_disks, self.disks):
7729
        rename_to.append(t_dsk.logical_id)
7730
        t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk["adopt"])
7731
        self.cfg.SetDiskID(t_dsk, pnode_name)
7732
      result = self.rpc.call_blockdev_rename(pnode_name,
7733
                                             zip(tmp_disks, rename_to))
7734
      result.Raise("Failed to rename adoped LVs")
7735
    else:
7736
      feedback_fn("* creating instance disks...")
7737
      try:
7738
        _CreateDisks(self, iobj)
7739
      except errors.OpExecError:
7740
        self.LogWarning("Device creation failed, reverting...")
7741
        try:
7742
          _RemoveDisks(self, iobj)
7743
        finally:
7744
          self.cfg.ReleaseDRBDMinors(instance)
7745
          raise
7746

    
7747
      if self.cfg.GetClusterInfo().prealloc_wipe_disks:
7748
        feedback_fn("* wiping instance disks...")
7749
        try:
7750
          _WipeDisks(self, iobj)
7751
        except errors.OpExecError:
7752
          self.LogWarning("Device wiping failed, reverting...")
7753
          try:
7754
            _RemoveDisks(self, iobj)
7755
          finally:
7756
            self.cfg.ReleaseDRBDMinors(instance)
7757
            raise
7758

    
7759
    feedback_fn("adding instance %s to cluster config" % instance)
7760

    
7761
    self.cfg.AddInstance(iobj, self.proc.GetECId())
7762

    
7763
    # Declare that we don't want to remove the instance lock anymore, as we've
7764
    # added the instance to the config
7765
    del self.remove_locks[locking.LEVEL_INSTANCE]
7766
    # Unlock all the nodes
7767
    if self.op.mode == constants.INSTANCE_IMPORT:
7768
      nodes_keep = [self.op.src_node]
7769
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
7770
                       if node != self.op.src_node]
7771
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
7772
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
7773
    else:
7774
      self.context.glm.release(locking.LEVEL_NODE)
7775
      del self.acquired_locks[locking.LEVEL_NODE]
7776

    
7777
    if self.op.wait_for_sync:
7778
      disk_abort = not _WaitForSync(self, iobj)
7779
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
7780
      # make sure the disks are not degraded (still sync-ing is ok)
7781
      time.sleep(15)
7782
      feedback_fn("* checking mirrors status")
7783
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
7784
    else:
7785
      disk_abort = False
7786

    
7787
    if disk_abort:
7788
      _RemoveDisks(self, iobj)
7789
      self.cfg.RemoveInstance(iobj.name)
7790
      # Make sure the instance lock gets removed
7791
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
7792
      raise errors.OpExecError("There are some degraded disks for"
7793
                               " this instance")
7794

    
7795
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
7796
      if self.op.mode == constants.INSTANCE_CREATE:
7797
        if not self.op.no_install:
7798
          feedback_fn("* running the instance OS create scripts...")
7799
          # FIXME: pass debug option from opcode to backend
7800
          result = self.rpc.call_instance_os_add(pnode_name, iobj, False,
7801
                                                 self.op.debug_level)
7802
          result.Raise("Could not add os for instance %s"
7803
                       " on node %s" % (instance, pnode_name))
7804

    
7805
      elif self.op.mode == constants.INSTANCE_IMPORT:
7806
        feedback_fn("* running the instance OS import scripts...")
7807

    
7808
        transfers = []
7809

    
7810
        for idx, image in enumerate(self.src_images):
7811
          if not image:
7812
            continue
7813

    
7814
          # FIXME: pass debug option from opcode to backend
7815
          dt = masterd.instance.DiskTransfer("disk/%s" % idx,
7816
                                             constants.IEIO_FILE, (image, ),
7817
                                             constants.IEIO_SCRIPT,
7818
                                             (iobj.disks[idx], idx),
7819
                                             None)
7820
          transfers.append(dt)
7821

    
7822
        import_result = \
7823
          masterd.instance.TransferInstanceData(self, feedback_fn,
7824
                                                self.op.src_node, pnode_name,
7825
                                                self.pnode.secondary_ip,
7826
                                                iobj, transfers)
7827
        if not compat.all(import_result):
7828
          self.LogWarning("Some disks for instance %s on node %s were not"
7829
                          " imported successfully" % (instance, pnode_name))
7830

    
7831
      elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
7832
        feedback_fn("* preparing remote import...")
7833
        # The source cluster will stop the instance before attempting to make a
7834
        # connection. In some cases stopping an instance can take a long time,
7835
        # hence the shutdown timeout is added to the connection timeout.
7836
        connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
7837
                           self.op.source_shutdown_timeout)
7838
        timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
7839

    
7840
        assert iobj.primary_node == self.pnode.name
7841
        disk_results = \
7842
          masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
7843
                                        self.source_x509_ca,
7844
                                        self._cds, timeouts)
7845
        if not compat.all(disk_results):
7846
          # TODO: Should the instance still be started, even if some disks
7847
          # failed to import (valid for local imports, too)?
7848
          self.LogWarning("Some disks for instance %s on node %s were not"
7849
                          " imported successfully" % (instance, pnode_name))
7850

    
7851
        # Run rename script on newly imported instance
7852
        assert iobj.name == instance
7853
        feedback_fn("Running rename script for %s" % instance)
7854
        result = self.rpc.call_instance_run_rename(pnode_name, iobj,
7855
                                                   self.source_instance_name,
7856
                                                   self.op.debug_level)
7857
        if result.fail_msg:
7858
          self.LogWarning("Failed to run rename script for %s on node"
7859
                          " %s: %s" % (instance, pnode_name, result.fail_msg))
7860

    
7861
      else:
7862
        # also checked in the prereq part
7863
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
7864
                                     % self.op.mode)
7865

    
7866
    if self.op.start:
7867
      iobj.admin_up = True
7868
      self.cfg.Update(iobj, feedback_fn)
7869
      logging.info("Starting instance %s on node %s", instance, pnode_name)
7870
      feedback_fn("* starting instance...")
7871
      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
7872
      result.Raise("Could not start instance")
7873

    
7874
    return list(iobj.all_nodes)
7875

    
7876

    
7877
class LUInstanceConsole(NoHooksLU):
7878
  """Connect to an instance's console.
7879

7880
  This is somewhat special in that it returns the command line that
7881
  you need to run on the master node in order to connect to the
7882
  console.
7883

7884
  """
7885
  REQ_BGL = False
7886

    
7887
  def ExpandNames(self):
7888
    self._ExpandAndLockInstance()
7889

    
7890
  def CheckPrereq(self):
7891
    """Check prerequisites.
7892

7893
    This checks that the instance is in the cluster.
7894

7895
    """
7896
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7897
    assert self.instance is not None, \
7898
      "Cannot retrieve locked instance %s" % self.op.instance_name
7899
    _CheckNodeOnline(self, self.instance.primary_node)
7900

    
7901
  def Exec(self, feedback_fn):
7902
    """Connect to the console of an instance
7903

7904
    """
7905
    instance = self.instance
7906
    node = instance.primary_node
7907

    
7908
    node_insts = self.rpc.call_instance_list([node],
7909
                                             [instance.hypervisor])[node]
7910
    node_insts.Raise("Can't get node information from %s" % node)
7911

    
7912
    if instance.name not in node_insts.payload:
7913
      if instance.admin_up:
7914
        state = constants.INSTST_ERRORDOWN
7915
      else:
7916
        state = constants.INSTST_ADMINDOWN
7917
      raise errors.OpExecError("Instance %s is not running (state %s)" %
7918
                               (instance.name, state))
7919

    
7920
    logging.debug("Connecting to console of %s on %s", instance.name, node)
7921

    
7922
    return _GetInstanceConsole(self.cfg.GetClusterInfo(), instance)
7923

    
7924

    
7925
def _GetInstanceConsole(cluster, instance):
7926
  """Returns console information for an instance.
7927

7928
  @type cluster: L{objects.Cluster}
7929
  @type instance: L{objects.Instance}
7930
  @rtype: dict
7931

7932
  """
7933
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
7934
  # beparams and hvparams are passed separately, to avoid editing the
7935
  # instance and then saving the defaults in the instance itself.
7936
  hvparams = cluster.FillHV(instance)
7937
  beparams = cluster.FillBE(instance)
7938
  console = hyper.GetInstanceConsole(instance, hvparams, beparams)
7939

    
7940
  assert console.instance == instance.name
7941
  assert console.Validate()
7942

    
7943
  return console.ToDict()
7944

    
7945

    
7946
class LUInstanceReplaceDisks(LogicalUnit):
7947
  """Replace the disks of an instance.
7948

7949
  """
7950
  HPATH = "mirrors-replace"
7951
  HTYPE = constants.HTYPE_INSTANCE
7952
  REQ_BGL = False
7953

    
7954
  def CheckArguments(self):
7955
    TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
7956
                                  self.op.iallocator)
7957

    
7958
  def ExpandNames(self):
7959
    self._ExpandAndLockInstance()
7960

    
7961
    if self.op.iallocator is not None:
7962
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7963

    
7964
    elif self.op.remote_node is not None:
7965
      remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
7966
      self.op.remote_node = remote_node
7967

    
7968
      # Warning: do not remove the locking of the new secondary here
7969
      # unless DRBD8.AddChildren is changed to work in parallel;
7970
      # currently it doesn't since parallel invocations of
7971
      # FindUnusedMinor will conflict
7972
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
7973
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
7974

    
7975
    else:
7976
      self.needed_locks[locking.LEVEL_NODE] = []
7977
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7978

    
7979
    self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
7980
                                   self.op.iallocator, self.op.remote_node,
7981
                                   self.op.disks, False, self.op.early_release)
7982

    
7983
    self.tasklets = [self.replacer]
7984

    
7985
  def DeclareLocks(self, level):
7986
    # If we're not already locking all nodes in the set we have to declare the
7987
    # instance's primary/secondary nodes.
7988
    if (level == locking.LEVEL_NODE and
7989
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
7990
      self._LockInstancesNodes()
7991

    
7992
  def BuildHooksEnv(self):
7993
    """Build hooks env.
7994

7995
    This runs on the master, the primary and all the secondaries.
7996

7997
    """
7998
    instance = self.replacer.instance
7999
    env = {
8000
      "MODE": self.op.mode,
8001
      "NEW_SECONDARY": self.op.remote_node,
8002
      "OLD_SECONDARY": instance.secondary_nodes[0],
8003
      }
8004
    env.update(_BuildInstanceHookEnvByObject(self, instance))
8005
    nl = [
8006
      self.cfg.GetMasterNode(),
8007
      instance.primary_node,
8008
      ]
8009
    if self.op.remote_node is not None:
8010
      nl.append(self.op.remote_node)
8011
    return env, nl, nl
8012

    
8013

    
8014
class TLReplaceDisks(Tasklet):
8015
  """Replaces disks for an instance.
8016

8017
  Note: Locking is not within the scope of this class.
8018

8019
  """
8020
  def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
8021
               disks, delay_iallocator, early_release):
8022
    """Initializes this class.
8023

8024
    """
8025
    Tasklet.__init__(self, lu)
8026

    
8027
    # Parameters
8028
    self.instance_name = instance_name
8029
    self.mode = mode
8030
    self.iallocator_name = iallocator_name
8031
    self.remote_node = remote_node
8032
    self.disks = disks
8033
    self.delay_iallocator = delay_iallocator
8034
    self.early_release = early_release
8035

    
8036
    # Runtime data
8037
    self.instance = None
8038
    self.new_node = None
8039
    self.target_node = None
8040
    self.other_node = None
8041
    self.remote_node_info = None
8042
    self.node_secondary_ip = None
8043

    
8044
  @staticmethod
8045
  def CheckArguments(mode, remote_node, iallocator):
8046
    """Helper function for users of this class.
8047

8048
    """
8049
    # check for valid parameter combination
8050
    if mode == constants.REPLACE_DISK_CHG:
8051
      if remote_node is None and iallocator is None:
8052
        raise errors.OpPrereqError("When changing the secondary either an"
8053
                                   " iallocator script must be used or the"
8054
                                   " new node given", errors.ECODE_INVAL)
8055

    
8056
      if remote_node is not None and iallocator is not None:
8057
        raise errors.OpPrereqError("Give either the iallocator or the new"
8058
                                   " secondary, not both", errors.ECODE_INVAL)
8059

    
8060
    elif remote_node is not None or iallocator is not None:
8061
      # Not replacing the secondary
8062
      raise errors.OpPrereqError("The iallocator and new node options can"
8063
                                 " only be used when changing the"
8064
                                 " secondary node", errors.ECODE_INVAL)
8065

    
8066
  @staticmethod
8067
  def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
8068
    """Compute a new secondary node using an IAllocator.
8069

8070
    """
8071
    ial = IAllocator(lu.cfg, lu.rpc,
8072
                     mode=constants.IALLOCATOR_MODE_RELOC,
8073
                     name=instance_name,
8074
                     relocate_from=relocate_from)
8075

    
8076
    ial.Run(iallocator_name)
8077

    
8078
    if not ial.success:
8079
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
8080
                                 " %s" % (iallocator_name, ial.info),
8081
                                 errors.ECODE_NORES)
8082

    
8083
    if len(ial.result) != ial.required_nodes:
8084
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
8085
                                 " of nodes (%s), required %s" %
8086
                                 (iallocator_name,
8087
                                  len(ial.result), ial.required_nodes),
8088
                                 errors.ECODE_FAULT)
8089

    
8090
    remote_node_name = ial.result[0]
8091

    
8092
    lu.LogInfo("Selected new secondary for instance '%s': %s",
8093
               instance_name, remote_node_name)
8094

    
8095
    return remote_node_name
8096

    
8097
  def _FindFaultyDisks(self, node_name):
8098
    return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
8099
                                    node_name, True)
8100

    
8101
  def CheckPrereq(self):
8102
    """Check prerequisites.
8103

8104
    This checks that the instance is in the cluster.
8105

8106
    """
8107
    self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
8108
    assert instance is not None, \
8109
      "Cannot retrieve locked instance %s" % self.instance_name
8110

    
8111
    if instance.disk_template != constants.DT_DRBD8:
8112
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
8113
                                 " instances", errors.ECODE_INVAL)
8114

    
8115
    if len(instance.secondary_nodes) != 1:
8116
      raise errors.OpPrereqError("The instance has a strange layout,"
8117
                                 " expected one secondary but found %d" %
8118
                                 len(instance.secondary_nodes),
8119
                                 errors.ECODE_FAULT)
8120

    
8121
    if not self.delay_iallocator:
8122
      self._CheckPrereq2()
8123

    
8124
  def _CheckPrereq2(self):
8125
    """Check prerequisites, second part.
8126

8127
    This function should always be part of CheckPrereq. It was separated and is
8128
    now called from Exec because during node evacuation iallocator was only
8129
    called with an unmodified cluster model, not taking planned changes into
8130
    account.
8131

8132
    """
8133
    instance = self.instance
8134
    secondary_node = instance.secondary_nodes[0]
8135

    
8136
    if self.iallocator_name is None:
8137
      remote_node = self.remote_node
8138
    else:
8139
      remote_node = self._RunAllocator(self.lu, self.iallocator_name,
8140
                                       instance.name, instance.secondary_nodes)
8141

    
8142
    if remote_node is not None:
8143
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
8144
      assert self.remote_node_info is not None, \
8145
        "Cannot retrieve locked node %s" % remote_node
8146
    else:
8147
      self.remote_node_info = None
8148

    
8149
    if remote_node == self.instance.primary_node:
8150
      raise errors.OpPrereqError("The specified node is the primary node of"
8151
                                 " the instance.", errors.ECODE_INVAL)
8152

    
8153
    if remote_node == secondary_node:
8154
      raise errors.OpPrereqError("The specified node is already the"
8155
                                 " secondary node of the instance.",
8156
                                 errors.ECODE_INVAL)
8157

    
8158
    if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
8159
                                    constants.REPLACE_DISK_CHG):
8160
      raise errors.OpPrereqError("Cannot specify disks to be replaced",
8161
                                 errors.ECODE_INVAL)
8162

    
8163
    if self.mode == constants.REPLACE_DISK_AUTO:
8164
      faulty_primary = self._FindFaultyDisks(instance.primary_node)
8165
      faulty_secondary = self._FindFaultyDisks(secondary_node)
8166

    
8167
      if faulty_primary and faulty_secondary:
8168
        raise errors.OpPrereqError("Instance %s has faulty disks on more than"
8169
                                   " one node and can not be repaired"
8170
                                   " automatically" % self.instance_name,
8171
                                   errors.ECODE_STATE)
8172

    
8173
      if faulty_primary:
8174
        self.disks = faulty_primary
8175
        self.target_node = instance.primary_node
8176
        self.other_node = secondary_node
8177
        check_nodes = [self.target_node, self.other_node]
8178
      elif faulty_secondary:
8179
        self.disks = faulty_secondary
8180
        self.target_node = secondary_node
8181
        self.other_node = instance.primary_node
8182
        check_nodes = [self.target_node, self.other_node]
8183
      else:
8184
        self.disks = []
8185
        check_nodes = []
8186

    
8187
    else:
8188
      # Non-automatic modes
8189
      if self.mode == constants.REPLACE_DISK_PRI:
8190
        self.target_node = instance.primary_node
8191
        self.other_node = secondary_node
8192
        check_nodes = [self.target_node, self.other_node]
8193

    
8194
      elif self.mode == constants.REPLACE_DISK_SEC:
8195
        self.target_node = secondary_node
8196
        self.other_node = instance.primary_node
8197
        check_nodes = [self.target_node, self.other_node]
8198

    
8199
      elif self.mode == constants.REPLACE_DISK_CHG:
8200
        self.new_node = remote_node
8201
        self.other_node = instance.primary_node
8202
        self.target_node = secondary_node
8203
        check_nodes = [self.new_node, self.other_node]
8204

    
8205
        _CheckNodeNotDrained(self.lu, remote_node)
8206
        _CheckNodeVmCapable(self.lu, remote_node)
8207

    
8208
        old_node_info = self.cfg.GetNodeInfo(secondary_node)
8209
        assert old_node_info is not None
8210
        if old_node_info.offline and not self.early_release:
8211
          # doesn't make sense to delay the release
8212
          self.early_release = True
8213
          self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
8214
                          " early-release mode", secondary_node)
8215

    
8216
      else:
8217
        raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
8218
                                     self.mode)
8219

    
8220
      # If not specified all disks should be replaced
8221
      if not self.disks:
8222
        self.disks = range(len(self.instance.disks))
8223

    
8224
    for node in check_nodes:
8225
      _CheckNodeOnline(self.lu, node)
8226

    
8227
    # Check whether disks are valid
8228
    for disk_idx in self.disks:
8229
      instance.FindDisk(disk_idx)
8230

    
8231
    # Get secondary node IP addresses
8232
    node_2nd_ip = {}
8233

    
8234
    for node_name in [self.target_node, self.other_node, self.new_node]:
8235
      if node_name is not None:
8236
        node_2nd_ip[node_name] = self.cfg.GetNodeInfo(node_name).secondary_ip
8237

    
8238
    self.node_secondary_ip = node_2nd_ip
8239

    
8240
  def Exec(self, feedback_fn):
8241
    """Execute disk replacement.
8242

8243
    This dispatches the disk replacement to the appropriate handler.
8244

8245
    """
8246
    if self.delay_iallocator:
8247
      self._CheckPrereq2()
8248

    
8249
    if not self.disks:
8250
      feedback_fn("No disks need replacement")
8251
      return
8252

    
8253
    feedback_fn("Replacing disk(s) %s for %s" %
8254
                (utils.CommaJoin(self.disks), self.instance.name))
8255

    
8256
    activate_disks = (not self.instance.admin_up)
8257

    
8258
    # Activate the instance disks if we're replacing them on a down instance
8259
    if activate_disks:
8260
      _StartInstanceDisks(self.lu, self.instance, True)
8261

    
8262
    try:
8263
      # Should we replace the secondary node?
8264
      if self.new_node is not None:
8265
        fn = self._ExecDrbd8Secondary
8266
      else:
8267
        fn = self._ExecDrbd8DiskOnly
8268

    
8269
      return fn(feedback_fn)
8270

    
8271
    finally:
8272
      # Deactivate the instance disks if we're replacing them on a
8273
      # down instance
8274
      if activate_disks:
8275
        _SafeShutdownInstanceDisks(self.lu, self.instance)
8276

    
8277
  def _CheckVolumeGroup(self, nodes):
8278
    self.lu.LogInfo("Checking volume groups")
8279

    
8280
    vgname = self.cfg.GetVGName()
8281

    
8282
    # Make sure volume group exists on all involved nodes
8283
    results = self.rpc.call_vg_list(nodes)
8284
    if not results:
8285
      raise errors.OpExecError("Can't list volume groups on the nodes")
8286

    
8287
    for node in nodes:
8288
      res = results[node]
8289
      res.Raise("Error checking node %s" % node)
8290
      if vgname not in res.payload:
8291
        raise errors.OpExecError("Volume group '%s' not found on node %s" %
8292
                                 (vgname, node))
8293

    
8294
  def _CheckDisksExistence(self, nodes):
8295
    # Check disk existence
8296
    for idx, dev in enumerate(self.instance.disks):
8297
      if idx not in self.disks:
8298
        continue
8299

    
8300
      for node in nodes:
8301
        self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
8302
        self.cfg.SetDiskID(dev, node)
8303

    
8304
        result = self.rpc.call_blockdev_find(node, dev)
8305

    
8306
        msg = result.fail_msg
8307
        if msg or not result.payload:
8308
          if not msg:
8309
            msg = "disk not found"
8310
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
8311
                                   (idx, node, msg))
8312

    
8313
  def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
8314
    for idx, dev in enumerate(self.instance.disks):
8315
      if idx not in self.disks:
8316
        continue
8317

    
8318
      self.lu.LogInfo("Checking disk/%d consistency on node %s" %
8319
                      (idx, node_name))
8320

    
8321
      if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary,
8322
                                   ldisk=ldisk):
8323
        raise errors.OpExecError("Node %s has degraded storage, unsafe to"
8324
                                 " replace disks for instance %s" %
8325
                                 (node_name, self.instance.name))
8326

    
8327
  def _CreateNewStorage(self, node_name):
8328
    vgname = self.cfg.GetVGName()
8329
    iv_names = {}
8330

    
8331
    for idx, dev in enumerate(self.instance.disks):
8332
      if idx not in self.disks:
8333
        continue
8334

    
8335
      self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
8336

    
8337
      self.cfg.SetDiskID(dev, node_name)
8338

    
8339
      lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
8340
      names = _GenerateUniqueNames(self.lu, lv_names)
8341

    
8342
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
8343
                             logical_id=(vgname, names[0]))
8344
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
8345
                             logical_id=(vgname, names[1]))
8346

    
8347
      new_lvs = [lv_data, lv_meta]
8348
      old_lvs = dev.children
8349
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
8350

    
8351
      # we pass force_create=True to force the LVM creation
8352
      for new_lv in new_lvs:
8353
        _CreateBlockDev(self.lu, node_name, self.instance, new_lv, True,
8354
                        _GetInstanceInfoText(self.instance), False)
8355

    
8356
    return iv_names
8357

    
8358
  def _CheckDevices(self, node_name, iv_names):
8359
    for name, (dev, _, _) in iv_names.iteritems():
8360
      self.cfg.SetDiskID(dev, node_name)
8361

    
8362
      result = self.rpc.call_blockdev_find(node_name, dev)
8363

    
8364
      msg = result.fail_msg
8365
      if msg or not result.payload:
8366
        if not msg:
8367
          msg = "disk not found"
8368
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
8369
                                 (name, msg))
8370

    
8371
      if result.payload.is_degraded:
8372
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
8373

    
8374
  def _RemoveOldStorage(self, node_name, iv_names):
8375
    for name, (_, old_lvs, _) in iv_names.iteritems():
8376
      self.lu.LogInfo("Remove logical volumes for %s" % name)
8377

    
8378
      for lv in old_lvs:
8379
        self.cfg.SetDiskID(lv, node_name)
8380

    
8381
        msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
8382
        if msg:
8383
          self.lu.LogWarning("Can't remove old LV: %s" % msg,
8384
                             hint="remove unused LVs manually")
8385

    
8386
  def _ReleaseNodeLock(self, node_name):
8387
    """Releases the lock for a given node."""
8388
    self.lu.context.glm.release(locking.LEVEL_NODE, node_name)
8389

    
8390
  def _ExecDrbd8DiskOnly(self, feedback_fn):
8391
    """Replace a disk on the primary or secondary for DRBD 8.
8392

8393
    The algorithm for replace is quite complicated:
8394

8395
      1. for each disk to be replaced:
8396

8397
        1. create new LVs on the target node with unique names
8398
        1. detach old LVs from the drbd device
8399
        1. rename old LVs to name_replaced.<time_t>
8400
        1. rename new LVs to old LVs
8401
        1. attach the new LVs (with the old names now) to the drbd device
8402

8403
      1. wait for sync across all devices
8404

8405
      1. for each modified disk:
8406

8407
        1. remove old LVs (which have the name name_replaces.<time_t>)
8408

8409
    Failures are not very well handled.
8410

8411
    """
8412
    steps_total = 6
8413

    
8414
    # Step: check device activation
8415
    self.lu.LogStep(1, steps_total, "Check device existence")
8416
    self._CheckDisksExistence([self.other_node, self.target_node])
8417
    self._CheckVolumeGroup([self.target_node, self.other_node])
8418

    
8419
    # Step: check other node consistency
8420
    self.lu.LogStep(2, steps_total, "Check peer consistency")
8421
    self._CheckDisksConsistency(self.other_node,
8422
                                self.other_node == self.instance.primary_node,
8423
                                False)
8424

    
8425
    # Step: create new storage
8426
    self.lu.LogStep(3, steps_total, "Allocate new storage")
8427
    iv_names = self._CreateNewStorage(self.target_node)
8428

    
8429
    # Step: for each lv, detach+rename*2+attach
8430
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
8431
    for dev, old_lvs, new_lvs in iv_names.itervalues():
8432
      self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
8433

    
8434
      result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
8435
                                                     old_lvs)
8436
      result.Raise("Can't detach drbd from local storage on node"
8437
                   " %s for device %s" % (self.target_node, dev.iv_name))
8438
      #dev.children = []
8439
      #cfg.Update(instance)
8440

    
8441
      # ok, we created the new LVs, so now we know we have the needed
8442
      # storage; as such, we proceed on the target node to rename
8443
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
8444
      # using the assumption that logical_id == physical_id (which in
8445
      # turn is the unique_id on that node)
8446

    
8447
      # FIXME(iustin): use a better name for the replaced LVs
8448
      temp_suffix = int(time.time())
8449
      ren_fn = lambda d, suff: (d.physical_id[0],
8450
                                d.physical_id[1] + "_replaced-%s" % suff)
8451

    
8452
      # Build the rename list based on what LVs exist on the node
8453
      rename_old_to_new = []
8454
      for to_ren in old_lvs:
8455
        result = self.rpc.call_blockdev_find(self.target_node, to_ren)
8456
        if not result.fail_msg and result.payload:
8457
          # device exists
8458
          rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
8459

    
8460
      self.lu.LogInfo("Renaming the old LVs on the target node")
8461
      result = self.rpc.call_blockdev_rename(self.target_node,
8462
                                             rename_old_to_new)
8463
      result.Raise("Can't rename old LVs on node %s" % self.target_node)
8464

    
8465
      # Now we rename the new LVs to the old LVs
8466
      self.lu.LogInfo("Renaming the new LVs on the target node")
8467
      rename_new_to_old = [(new, old.physical_id)
8468
                           for old, new in zip(old_lvs, new_lvs)]
8469
      result = self.rpc.call_blockdev_rename(self.target_node,
8470
                                             rename_new_to_old)
8471
      result.Raise("Can't rename new LVs on node %s" % self.target_node)
8472

    
8473
      for old, new in zip(old_lvs, new_lvs):
8474
        new.logical_id = old.logical_id
8475
        self.cfg.SetDiskID(new, self.target_node)
8476

    
8477
      for disk in old_lvs:
8478
        disk.logical_id = ren_fn(disk, temp_suffix)
8479
        self.cfg.SetDiskID(disk, self.target_node)
8480

    
8481
      # Now that the new lvs have the old name, we can add them to the device
8482
      self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
8483
      result = self.rpc.call_blockdev_addchildren(self.target_node, dev,
8484
                                                  new_lvs)
8485
      msg = result.fail_msg
8486
      if msg:
8487
        for new_lv in new_lvs:
8488
          msg2 = self.rpc.call_blockdev_remove(self.target_node,
8489
                                               new_lv).fail_msg
8490
          if msg2:
8491
            self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
8492
                               hint=("cleanup manually the unused logical"
8493
                                     "volumes"))
8494
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
8495

    
8496
      dev.children = new_lvs
8497

    
8498
      self.cfg.Update(self.instance, feedback_fn)
8499

    
8500
    cstep = 5
8501
    if self.early_release:
8502
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
8503
      cstep += 1
8504
      self._RemoveOldStorage(self.target_node, iv_names)
8505
      # WARNING: we release both node locks here, do not do other RPCs
8506
      # than WaitForSync to the primary node
8507
      self._ReleaseNodeLock([self.target_node, self.other_node])
8508

    
8509
    # Wait for sync
8510
    # This can fail as the old devices are degraded and _WaitForSync
8511
    # does a combined result over all disks, so we don't check its return value
8512
    self.lu.LogStep(cstep, steps_total, "Sync devices")
8513
    cstep += 1
8514
    _WaitForSync(self.lu, self.instance)
8515

    
8516
    # Check all devices manually
8517
    self._CheckDevices(self.instance.primary_node, iv_names)
8518

    
8519
    # Step: remove old storage
8520
    if not self.early_release:
8521
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
8522
      cstep += 1
8523
      self._RemoveOldStorage(self.target_node, iv_names)
8524

    
8525
  def _ExecDrbd8Secondary(self, feedback_fn):
8526
    """Replace the secondary node for DRBD 8.
8527

8528
    The algorithm for replace is quite complicated:
8529
      - for all disks of the instance:
8530
        - create new LVs on the new node with same names
8531
        - shutdown the drbd device on the old secondary
8532
        - disconnect the drbd network on the primary
8533
        - create the drbd device on the new secondary
8534
        - network attach the drbd on the primary, using an artifice:
8535
          the drbd code for Attach() will connect to the network if it
8536
          finds a device which is connected to the good local disks but
8537
          not network enabled
8538
      - wait for sync across all devices
8539
      - remove all disks from the old secondary
8540

8541
    Failures are not very well handled.
8542

8543
    """
8544
    steps_total = 6
8545

    
8546
    # Step: check device activation
8547
    self.lu.LogStep(1, steps_total, "Check device existence")
8548
    self._CheckDisksExistence([self.instance.primary_node])
8549
    self._CheckVolumeGroup([self.instance.primary_node])
8550

    
8551
    # Step: check other node consistency
8552
    self.lu.LogStep(2, steps_total, "Check peer consistency")
8553
    self._CheckDisksConsistency(self.instance.primary_node, True, True)
8554

    
8555
    # Step: create new storage
8556
    self.lu.LogStep(3, steps_total, "Allocate new storage")
8557
    for idx, dev in enumerate(self.instance.disks):
8558
      self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
8559
                      (self.new_node, idx))
8560
      # we pass force_create=True to force LVM creation
8561
      for new_lv in dev.children:
8562
        _CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True,
8563
                        _GetInstanceInfoText(self.instance), False)
8564

    
8565
    # Step 4: dbrd minors and drbd setups changes
8566
    # after this, we must manually remove the drbd minors on both the
8567
    # error and the success paths
8568
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
8569
    minors = self.cfg.AllocateDRBDMinor([self.new_node
8570
                                         for dev in self.instance.disks],
8571
                                        self.instance.name)
8572
    logging.debug("Allocated minors %r", minors)
8573

    
8574
    iv_names = {}
8575
    for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
8576
      self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
8577
                      (self.new_node, idx))
8578
      # create new devices on new_node; note that we create two IDs:
8579
      # one without port, so the drbd will be activated without
8580
      # networking information on the new node at this stage, and one
8581
      # with network, for the latter activation in step 4
8582
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
8583
      if self.instance.primary_node == o_node1:
8584
        p_minor = o_minor1
8585
      else:
8586
        assert self.instance.primary_node == o_node2, "Three-node instance?"
8587
        p_minor = o_minor2
8588

    
8589
      new_alone_id = (self.instance.primary_node, self.new_node, None,
8590
                      p_minor, new_minor, o_secret)
8591
      new_net_id = (self.instance.primary_node, self.new_node, o_port,
8592
                    p_minor, new_minor, o_secret)
8593

    
8594
      iv_names[idx] = (dev, dev.children, new_net_id)
8595
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
8596
                    new_net_id)
8597
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
8598
                              logical_id=new_alone_id,
8599
                              children=dev.children,
8600
                              size=dev.size)
8601
      try:
8602
        _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
8603
                              _GetInstanceInfoText(self.instance), False)
8604
      except errors.GenericError:
8605
        self.cfg.ReleaseDRBDMinors(self.instance.name)
8606
        raise
8607

    
8608
    # We have new devices, shutdown the drbd on the old secondary
8609
    for idx, dev in enumerate(self.instance.disks):
8610
      self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
8611
      self.cfg.SetDiskID(dev, self.target_node)
8612
      msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
8613
      if msg:
8614
        self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
8615
                           "node: %s" % (idx, msg),
8616
                           hint=("Please cleanup this device manually as"
8617
                                 " soon as possible"))
8618

    
8619
    self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
8620
    result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node],
8621
                                               self.node_secondary_ip,
8622
                                               self.instance.disks)\
8623
                                              [self.instance.primary_node]
8624

    
8625
    msg = result.fail_msg
8626
    if msg:
8627
      # detaches didn't succeed (unlikely)
8628
      self.cfg.ReleaseDRBDMinors(self.instance.name)
8629
      raise errors.OpExecError("Can't detach the disks from the network on"
8630
                               " old node: %s" % (msg,))
8631

    
8632
    # if we managed to detach at least one, we update all the disks of
8633
    # the instance to point to the new secondary
8634
    self.lu.LogInfo("Updating instance configuration")
8635
    for dev, _, new_logical_id in iv_names.itervalues():
8636
      dev.logical_id = new_logical_id
8637
      self.cfg.SetDiskID(dev, self.instance.primary_node)
8638

    
8639
    self.cfg.Update(self.instance, feedback_fn)
8640

    
8641
    # and now perform the drbd attach
8642
    self.lu.LogInfo("Attaching primary drbds to new secondary"
8643
                    " (standalone => connected)")
8644
    result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
8645
                                            self.new_node],
8646
                                           self.node_secondary_ip,
8647
                                           self.instance.disks,
8648
                                           self.instance.name,
8649
                                           False)
8650
    for to_node, to_result in result.items():
8651
      msg = to_result.fail_msg
8652
      if msg:
8653
        self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
8654
                           to_node, msg,
8655
                           hint=("please do a gnt-instance info to see the"
8656
                                 " status of disks"))
8657
    cstep = 5
8658
    if self.early_release:
8659
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
8660
      cstep += 1
8661
      self._RemoveOldStorage(self.target_node, iv_names)
8662
      # WARNING: we release all node locks here, do not do other RPCs
8663
      # than WaitForSync to the primary node
8664
      self._ReleaseNodeLock([self.instance.primary_node,
8665
                             self.target_node,
8666
                             self.new_node])
8667

    
8668
    # Wait for sync
8669
    # This can fail as the old devices are degraded and _WaitForSync
8670
    # does a combined result over all disks, so we don't check its return value
8671
    self.lu.LogStep(cstep, steps_total, "Sync devices")
8672
    cstep += 1
8673
    _WaitForSync(self.lu, self.instance)
8674

    
8675
    # Check all devices manually
8676
    self._CheckDevices(self.instance.primary_node, iv_names)
8677

    
8678
    # Step: remove old storage
8679
    if not self.early_release:
8680
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
8681
      self._RemoveOldStorage(self.target_node, iv_names)
8682

    
8683

    
8684
class LURepairNodeStorage(NoHooksLU):
8685
  """Repairs the volume group on a node.
8686

8687
  """
8688
  REQ_BGL = False
8689

    
8690
  def CheckArguments(self):
8691
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
8692

    
8693
    storage_type = self.op.storage_type
8694

    
8695
    if (constants.SO_FIX_CONSISTENCY not in
8696
        constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
8697
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
8698
                                 " repaired" % storage_type,
8699
                                 errors.ECODE_INVAL)
8700

    
8701
  def ExpandNames(self):
8702
    self.needed_locks = {
8703
      locking.LEVEL_NODE: [self.op.node_name],
8704
      }
8705

    
8706
  def _CheckFaultyDisks(self, instance, node_name):
8707
    """Ensure faulty disks abort the opcode or at least warn."""
8708
    try:
8709
      if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
8710
                                  node_name, True):
8711
        raise errors.OpPrereqError("Instance '%s' has faulty disks on"
8712
                                   " node '%s'" % (instance.name, node_name),
8713
                                   errors.ECODE_STATE)
8714
    except errors.OpPrereqError, err:
8715
      if self.op.ignore_consistency:
8716
        self.proc.LogWarning(str(err.args[0]))
8717
      else:
8718
        raise
8719

    
8720
  def CheckPrereq(self):
8721
    """Check prerequisites.
8722

8723
    """
8724
    # Check whether any instance on this node has faulty disks
8725
    for inst in _GetNodeInstances(self.cfg, self.op.node_name):
8726
      if not inst.admin_up:
8727
        continue
8728
      check_nodes = set(inst.all_nodes)
8729
      check_nodes.discard(self.op.node_name)
8730
      for inst_node_name in check_nodes:
8731
        self._CheckFaultyDisks(inst, inst_node_name)
8732

    
8733
  def Exec(self, feedback_fn):
8734
    feedback_fn("Repairing storage unit '%s' on %s ..." %
8735
                (self.op.name, self.op.node_name))
8736

    
8737
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
8738
    result = self.rpc.call_storage_execute(self.op.node_name,
8739
                                           self.op.storage_type, st_args,
8740
                                           self.op.name,
8741
                                           constants.SO_FIX_CONSISTENCY)
8742
    result.Raise("Failed to repair storage unit '%s' on %s" %
8743
                 (self.op.name, self.op.node_name))
8744

    
8745

    
8746
class LUNodeEvacStrategy(NoHooksLU):
8747
  """Computes the node evacuation strategy.
8748

8749
  """
8750
  REQ_BGL = False
8751

    
8752
  def CheckArguments(self):
8753
    _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
8754

    
8755
  def ExpandNames(self):
8756
    self.op.nodes = _GetWantedNodes(self, self.op.nodes)
8757
    self.needed_locks = locks = {}
8758
    if self.op.remote_node is None:
8759
      locks[locking.LEVEL_NODE] = locking.ALL_SET
8760
    else:
8761
      self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
8762
      locks[locking.LEVEL_NODE] = self.op.nodes + [self.op.remote_node]
8763

    
8764
  def Exec(self, feedback_fn):
8765
    if self.op.remote_node is not None:
8766
      instances = []
8767
      for node in self.op.nodes:
8768
        instances.extend(_GetNodeSecondaryInstances(self.cfg, node))
8769
      result = []
8770
      for i in instances:
8771
        if i.primary_node == self.op.remote_node:
8772
          raise errors.OpPrereqError("Node %s is the primary node of"
8773
                                     " instance %s, cannot use it as"
8774
                                     " secondary" %
8775
                                     (self.op.remote_node, i.name),
8776
                                     errors.ECODE_INVAL)
8777
        result.append([i.name, self.op.remote_node])
8778
    else:
8779
      ial = IAllocator(self.cfg, self.rpc,
8780
                       mode=constants.IALLOCATOR_MODE_MEVAC,
8781
                       evac_nodes=self.op.nodes)
8782
      ial.Run(self.op.iallocator, validate=True)
8783
      if not ial.success:
8784
        raise errors.OpExecError("No valid evacuation solution: %s" % ial.info,
8785
                                 errors.ECODE_NORES)
8786
      result = ial.result
8787
    return result
8788

    
8789

    
8790
class LUInstanceGrowDisk(LogicalUnit):
8791
  """Grow a disk of an instance.
8792

8793
  """
8794
  HPATH = "disk-grow"
8795
  HTYPE = constants.HTYPE_INSTANCE
8796
  REQ_BGL = False
8797

    
8798
  def ExpandNames(self):
8799
    self._ExpandAndLockInstance()
8800
    self.needed_locks[locking.LEVEL_NODE] = []
8801
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8802

    
8803
  def DeclareLocks(self, level):
8804
    if level == locking.LEVEL_NODE:
8805
      self._LockInstancesNodes()
8806

    
8807
  def BuildHooksEnv(self):
8808
    """Build hooks env.
8809

8810
    This runs on the master, the primary and all the secondaries.
8811

8812
    """
8813
    env = {
8814
      "DISK": self.op.disk,
8815
      "AMOUNT": self.op.amount,
8816
      }
8817
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
8818
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
8819
    return env, nl, nl
8820

    
8821
  def CheckPrereq(self):
8822
    """Check prerequisites.
8823

8824
    This checks that the instance is in the cluster.
8825

8826
    """
8827
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
8828
    assert instance is not None, \
8829
      "Cannot retrieve locked instance %s" % self.op.instance_name
8830
    nodenames = list(instance.all_nodes)
8831
    for node in nodenames:
8832
      _CheckNodeOnline(self, node)
8833

    
8834
    self.instance = instance
8835

    
8836
    if instance.disk_template not in constants.DTS_GROWABLE:
8837
      raise errors.OpPrereqError("Instance's disk layout does not support"
8838
                                 " growing.", errors.ECODE_INVAL)
8839

    
8840
    self.disk = instance.FindDisk(self.op.disk)
8841

    
8842
    if instance.disk_template not in (constants.DT_FILE,
8843
                                      constants.DT_SHARED_FILE):
8844
      # TODO: check the free disk space for file, when that feature will be
8845
      # supported
8846
      _CheckNodesFreeDiskPerVG(self, nodenames,
8847
                               self.disk.ComputeGrowth(self.op.amount))
8848

    
8849
  def Exec(self, feedback_fn):
8850
    """Execute disk grow.
8851

8852
    """
8853
    instance = self.instance
8854
    disk = self.disk
8855

    
8856
    disks_ok, _ = _AssembleInstanceDisks(self, self.instance, disks=[disk])
8857
    if not disks_ok:
8858
      raise errors.OpExecError("Cannot activate block device to grow")
8859

    
8860
    for node in instance.all_nodes:
8861
      self.cfg.SetDiskID(disk, node)
8862
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
8863
      result.Raise("Grow request failed to node %s" % node)
8864

    
8865
      # TODO: Rewrite code to work properly
8866
      # DRBD goes into sync mode for a short amount of time after executing the
8867
      # "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby
8868
      # calling "resize" in sync mode fails. Sleeping for a short amount of
8869
      # time is a work-around.
8870
      time.sleep(5)
8871

    
8872
    disk.RecordGrow(self.op.amount)
8873
    self.cfg.Update(instance, feedback_fn)
8874
    if self.op.wait_for_sync:
8875
      disk_abort = not _WaitForSync(self, instance, disks=[disk])
8876
      if disk_abort:
8877
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
8878
                             " status.\nPlease check the instance.")
8879
      if not instance.admin_up:
8880
        _SafeShutdownInstanceDisks(self, instance, disks=[disk])
8881
    elif not instance.admin_up:
8882
      self.proc.LogWarning("Not shutting down the disk even if the instance is"
8883
                           " not supposed to be running because no wait for"
8884
                           " sync mode was requested.")
8885

    
8886

    
8887
class LUInstanceQueryData(NoHooksLU):
8888
  """Query runtime instance data.
8889

8890
  """
8891
  REQ_BGL = False
8892

    
8893
  def ExpandNames(self):
8894
    self.needed_locks = {}
8895
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
8896

    
8897
    if self.op.instances:
8898
      self.wanted_names = []
8899
      for name in self.op.instances:
8900
        full_name = _ExpandInstanceName(self.cfg, name)
8901
        self.wanted_names.append(full_name)
8902
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
8903
    else:
8904
      self.wanted_names = None
8905
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
8906

    
8907
    self.needed_locks[locking.LEVEL_NODE] = []
8908
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8909

    
8910
  def DeclareLocks(self, level):
8911
    if level == locking.LEVEL_NODE:
8912
      self._LockInstancesNodes()
8913

    
8914
  def CheckPrereq(self):
8915
    """Check prerequisites.
8916

8917
    This only checks the optional instance list against the existing names.
8918

8919
    """
8920
    if self.wanted_names is None:
8921
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
8922

    
8923
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
8924
                             in self.wanted_names]
8925

    
8926
  def _ComputeBlockdevStatus(self, node, instance_name, dev):
8927
    """Returns the status of a block device
8928

8929
    """
8930
    if self.op.static or not node:
8931
      return None
8932

    
8933
    self.cfg.SetDiskID(dev, node)
8934

    
8935
    result = self.rpc.call_blockdev_find(node, dev)
8936
    if result.offline:
8937
      return None
8938

    
8939
    result.Raise("Can't compute disk status for %s" % instance_name)
8940

    
8941
    status = result.payload
8942
    if status is None:
8943
      return None
8944

    
8945
    return (status.dev_path, status.major, status.minor,
8946
            status.sync_percent, status.estimated_time,
8947
            status.is_degraded, status.ldisk_status)
8948

    
8949
  def _ComputeDiskStatus(self, instance, snode, dev):
8950
    """Compute block device status.
8951

8952
    """
8953
    if dev.dev_type in constants.LDS_DRBD:
8954
      # we change the snode then (otherwise we use the one passed in)
8955
      if dev.logical_id[0] == instance.primary_node:
8956
        snode = dev.logical_id[1]
8957
      else:
8958
        snode = dev.logical_id[0]
8959

    
8960
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
8961
                                              instance.name, dev)
8962
    dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
8963

    
8964
    if dev.children:
8965
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
8966
                      for child in dev.children]
8967
    else:
8968
      dev_children = []
8969

    
8970
    data = {
8971
      "iv_name": dev.iv_name,
8972
      "dev_type": dev.dev_type,
8973
      "logical_id": dev.logical_id,
8974
      "physical_id": dev.physical_id,
8975
      "pstatus": dev_pstatus,
8976
      "sstatus": dev_sstatus,
8977
      "children": dev_children,
8978
      "mode": dev.mode,
8979
      "size": dev.size,
8980
      }
8981

    
8982
    return data
8983

    
8984
  def Exec(self, feedback_fn):
8985
    """Gather and return data"""
8986
    result = {}
8987

    
8988
    cluster = self.cfg.GetClusterInfo()
8989

    
8990
    for instance in self.wanted_instances:
8991
      if not self.op.static:
8992
        remote_info = self.rpc.call_instance_info(instance.primary_node,
8993
                                                  instance.name,
8994
                                                  instance.hypervisor)
8995
        remote_info.Raise("Error checking node %s" % instance.primary_node)
8996
        remote_info = remote_info.payload
8997
        if remote_info and "state" in remote_info:
8998
          remote_state = "up"
8999
        else:
9000
          remote_state = "down"
9001
      else:
9002
        remote_state = None
9003
      if instance.admin_up:
9004
        config_state = "up"
9005
      else:
9006
        config_state = "down"
9007

    
9008
      disks = [self._ComputeDiskStatus(instance, None, device)
9009
               for device in instance.disks]
9010

    
9011
      idict = {
9012
        "name": instance.name,
9013
        "config_state": config_state,
9014
        "run_state": remote_state,
9015
        "pnode": instance.primary_node,
9016
        "snodes": instance.secondary_nodes,
9017
        "os": instance.os,
9018
        # this happens to be the same format used for hooks
9019
        "nics": _NICListToTuple(self, instance.nics),
9020
        "disk_template": instance.disk_template,
9021
        "disks": disks,
9022
        "hypervisor": instance.hypervisor,
9023
        "network_port": instance.network_port,
9024
        "hv_instance": instance.hvparams,
9025
        "hv_actual": cluster.FillHV(instance, skip_globals=True),
9026
        "be_instance": instance.beparams,
9027
        "be_actual": cluster.FillBE(instance),
9028
        "os_instance": instance.osparams,
9029
        "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
9030
        "serial_no": instance.serial_no,
9031
        "mtime": instance.mtime,
9032
        "ctime": instance.ctime,
9033
        "uuid": instance.uuid,
9034
        }
9035

    
9036
      result[instance.name] = idict
9037

    
9038
    return result
9039

    
9040

    
9041
class LUInstanceSetParams(LogicalUnit):
9042
  """Modifies an instances's parameters.
9043

9044
  """
9045
  HPATH = "instance-modify"
9046
  HTYPE = constants.HTYPE_INSTANCE
9047
  REQ_BGL = False
9048

    
9049
  def CheckArguments(self):
9050
    if not (self.op.nics or self.op.disks or self.op.disk_template or
9051
            self.op.hvparams or self.op.beparams or self.op.os_name):
9052
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
9053

    
9054
    if self.op.hvparams:
9055
      _CheckGlobalHvParams(self.op.hvparams)
9056

    
9057
    # Disk validation
9058
    disk_addremove = 0
9059
    for disk_op, disk_dict in self.op.disks:
9060
      utils.ForceDictType(disk_dict, constants.IDISK_PARAMS_TYPES)
9061
      if disk_op == constants.DDM_REMOVE:
9062
        disk_addremove += 1
9063
        continue
9064
      elif disk_op == constants.DDM_ADD:
9065
        disk_addremove += 1
9066
      else:
9067
        if not isinstance(disk_op, int):
9068
          raise errors.OpPrereqError("Invalid disk index", errors.ECODE_INVAL)
9069
        if not isinstance(disk_dict, dict):
9070
          msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
9071
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
9072

    
9073
      if disk_op == constants.DDM_ADD:
9074
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
9075
        if mode not in constants.DISK_ACCESS_SET:
9076
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
9077
                                     errors.ECODE_INVAL)
9078
        size = disk_dict.get('size', None)
9079
        if size is None:
9080
          raise errors.OpPrereqError("Required disk parameter size missing",
9081
                                     errors.ECODE_INVAL)
9082
        try:
9083
          size = int(size)
9084
        except (TypeError, ValueError), err:
9085
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
9086
                                     str(err), errors.ECODE_INVAL)
9087
        disk_dict['size'] = size
9088
      else:
9089
        # modification of disk
9090
        if 'size' in disk_dict:
9091
          raise errors.OpPrereqError("Disk size change not possible, use"
9092
                                     " grow-disk", errors.ECODE_INVAL)
9093

    
9094
    if disk_addremove > 1:
9095
      raise errors.OpPrereqError("Only one disk add or remove operation"
9096
                                 " supported at a time", errors.ECODE_INVAL)
9097

    
9098
    if self.op.disks and self.op.disk_template is not None:
9099
      raise errors.OpPrereqError("Disk template conversion and other disk"
9100
                                 " changes not supported at the same time",
9101
                                 errors.ECODE_INVAL)
9102

    
9103
    if (self.op.disk_template and
9104
        self.op.disk_template in constants.DTS_NET_MIRROR and
9105
        self.op.remote_node is None):
9106
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
9107
                                 " one requires specifying a secondary node",
9108
                                 errors.ECODE_INVAL)
9109

    
9110
    # NIC validation
9111
    nic_addremove = 0
9112
    for nic_op, nic_dict in self.op.nics:
9113
      utils.ForceDictType(nic_dict, constants.INIC_PARAMS_TYPES)
9114
      if nic_op == constants.DDM_REMOVE:
9115
        nic_addremove += 1
9116
        continue
9117
      elif nic_op == constants.DDM_ADD:
9118
        nic_addremove += 1
9119
      else:
9120
        if not isinstance(nic_op, int):
9121
          raise errors.OpPrereqError("Invalid nic index", errors.ECODE_INVAL)
9122
        if not isinstance(nic_dict, dict):
9123
          msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
9124
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
9125

    
9126
      # nic_dict should be a dict
9127
      nic_ip = nic_dict.get('ip', None)
9128
      if nic_ip is not None:
9129
        if nic_ip.lower() == constants.VALUE_NONE:
9130
          nic_dict['ip'] = None
9131
        else:
9132
          if not netutils.IPAddress.IsValid(nic_ip):
9133
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip,
9134
                                       errors.ECODE_INVAL)
9135

    
9136
      nic_bridge = nic_dict.get('bridge', None)
9137
      nic_link = nic_dict.get('link', None)
9138
      if nic_bridge and nic_link:
9139
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
9140
                                   " at the same time", errors.ECODE_INVAL)
9141
      elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
9142
        nic_dict['bridge'] = None
9143
      elif nic_link and nic_link.lower() == constants.VALUE_NONE:
9144
        nic_dict['link'] = None
9145

    
9146
      if nic_op == constants.DDM_ADD:
9147
        nic_mac = nic_dict.get('mac', None)
9148
        if nic_mac is None:
9149
          nic_dict['mac'] = constants.VALUE_AUTO
9150

    
9151
      if 'mac' in nic_dict:
9152
        nic_mac = nic_dict['mac']
9153
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
9154
          nic_mac = utils.NormalizeAndValidateMac(nic_mac)
9155

    
9156
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
9157
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
9158
                                     " modifying an existing nic",
9159
                                     errors.ECODE_INVAL)
9160

    
9161
    if nic_addremove > 1:
9162
      raise errors.OpPrereqError("Only one NIC add or remove operation"
9163
                                 " supported at a time", errors.ECODE_INVAL)
9164

    
9165
  def ExpandNames(self):
9166
    self._ExpandAndLockInstance()
9167
    self.needed_locks[locking.LEVEL_NODE] = []
9168
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
9169

    
9170
  def DeclareLocks(self, level):
9171
    if level == locking.LEVEL_NODE:
9172
      self._LockInstancesNodes()
9173
      if self.op.disk_template and self.op.remote_node:
9174
        self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
9175
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
9176

    
9177
  def BuildHooksEnv(self):
9178
    """Build hooks env.
9179

9180
    This runs on the master, primary and secondaries.
9181

9182
    """
9183
    args = dict()
9184
    if constants.BE_MEMORY in self.be_new:
9185
      args['memory'] = self.be_new[constants.BE_MEMORY]
9186
    if constants.BE_VCPUS in self.be_new:
9187
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
9188
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
9189
    # information at all.
9190
    if self.op.nics:
9191
      args['nics'] = []
9192
      nic_override = dict(self.op.nics)
9193
      for idx, nic in enumerate(self.instance.nics):
9194
        if idx in nic_override:
9195
          this_nic_override = nic_override[idx]
9196
        else:
9197
          this_nic_override = {}
9198
        if 'ip' in this_nic_override:
9199
          ip = this_nic_override['ip']
9200
        else:
9201
          ip = nic.ip
9202
        if 'mac' in this_nic_override:
9203
          mac = this_nic_override['mac']
9204
        else:
9205
          mac = nic.mac
9206
        if idx in self.nic_pnew:
9207
          nicparams = self.nic_pnew[idx]
9208
        else:
9209
          nicparams = self.cluster.SimpleFillNIC(nic.nicparams)
9210
        mode = nicparams[constants.NIC_MODE]
9211
        link = nicparams[constants.NIC_LINK]
9212
        args['nics'].append((ip, mac, mode, link))
9213
      if constants.DDM_ADD in nic_override:
9214
        ip = nic_override[constants.DDM_ADD].get('ip', None)
9215
        mac = nic_override[constants.DDM_ADD]['mac']
9216
        nicparams = self.nic_pnew[constants.DDM_ADD]
9217
        mode = nicparams[constants.NIC_MODE]
9218
        link = nicparams[constants.NIC_LINK]
9219
        args['nics'].append((ip, mac, mode, link))
9220
      elif constants.DDM_REMOVE in nic_override:
9221
        del args['nics'][-1]
9222

    
9223
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
9224
    if self.op.disk_template:
9225
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
9226
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
9227
    return env, nl, nl
9228

    
9229
  def CheckPrereq(self):
9230
    """Check prerequisites.
9231

9232
    This only checks the instance list against the existing names.
9233

9234
    """
9235
    # checking the new params on the primary/secondary nodes
9236

    
9237
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
9238
    cluster = self.cluster = self.cfg.GetClusterInfo()
9239
    assert self.instance is not None, \
9240
      "Cannot retrieve locked instance %s" % self.op.instance_name
9241
    pnode = instance.primary_node
9242
    nodelist = list(instance.all_nodes)
9243

    
9244
    # OS change
9245
    if self.op.os_name and not self.op.force:
9246
      _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
9247
                      self.op.force_variant)
9248
      instance_os = self.op.os_name
9249
    else:
9250
      instance_os = instance.os
9251

    
9252
    if self.op.disk_template:
9253
      if instance.disk_template == self.op.disk_template:
9254
        raise errors.OpPrereqError("Instance already has disk template %s" %
9255
                                   instance.disk_template, errors.ECODE_INVAL)
9256

    
9257
      if (instance.disk_template,
9258
          self.op.disk_template) not in self._DISK_CONVERSIONS:
9259
        raise errors.OpPrereqError("Unsupported disk template conversion from"
9260
                                   " %s to %s" % (instance.disk_template,
9261
                                                  self.op.disk_template),
9262
                                   errors.ECODE_INVAL)
9263
      _CheckInstanceDown(self, instance, "cannot change disk template")
9264
      if self.op.disk_template in constants.DTS_NET_MIRROR:
9265
        if self.op.remote_node == pnode:
9266
          raise errors.OpPrereqError("Given new secondary node %s is the same"
9267
                                     " as the primary node of the instance" %
9268
                                     self.op.remote_node, errors.ECODE_STATE)
9269
        _CheckNodeOnline(self, self.op.remote_node)
9270
        _CheckNodeNotDrained(self, self.op.remote_node)
9271
        # FIXME: here we assume that the old instance type is DT_PLAIN
9272
        assert instance.disk_template == constants.DT_PLAIN
9273
        disks = [{"size": d.size, "vg": d.logical_id[0]}
9274
                 for d in instance.disks]
9275
        required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
9276
        _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
9277

    
9278
    # hvparams processing
9279
    if self.op.hvparams:
9280
      hv_type = instance.hypervisor
9281
      i_hvdict = _GetUpdatedParams(instance.hvparams, self.op.hvparams)
9282
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
9283
      hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
9284

    
9285
      # local check
9286
      hypervisor.GetHypervisor(hv_type).CheckParameterSyntax(hv_new)
9287
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
9288
      self.hv_new = hv_new # the new actual values
9289
      self.hv_inst = i_hvdict # the new dict (without defaults)
9290
    else:
9291
      self.hv_new = self.hv_inst = {}
9292

    
9293
    # beparams processing
9294
    if self.op.beparams:
9295
      i_bedict = _GetUpdatedParams(instance.beparams, self.op.beparams,
9296
                                   use_none=True)
9297
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
9298
      be_new = cluster.SimpleFillBE(i_bedict)
9299
      self.be_new = be_new # the new actual values
9300
      self.be_inst = i_bedict # the new dict (without defaults)
9301
    else:
9302
      self.be_new = self.be_inst = {}
9303

    
9304
    # osparams processing
9305
    if self.op.osparams:
9306
      i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
9307
      _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
9308
      self.os_inst = i_osdict # the new dict (without defaults)
9309
    else:
9310
      self.os_inst = {}
9311

    
9312
    self.warn = []
9313

    
9314
    if constants.BE_MEMORY in self.op.beparams and not self.op.force:
9315
      mem_check_list = [pnode]
9316
      if be_new[constants.BE_AUTO_BALANCE]:
9317
        # either we changed auto_balance to yes or it was from before
9318
        mem_check_list.extend(instance.secondary_nodes)
9319
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
9320
                                                  instance.hypervisor)
9321
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
9322
                                         instance.hypervisor)
9323
      pninfo = nodeinfo[pnode]
9324
      msg = pninfo.fail_msg
9325
      if msg:
9326
        # Assume the primary node is unreachable and go ahead
9327
        self.warn.append("Can't get info from primary node %s: %s" %
9328
                         (pnode,  msg))
9329
      elif not isinstance(pninfo.payload.get('memory_free', None), int):
9330
        self.warn.append("Node data from primary node %s doesn't contain"
9331
                         " free memory information" % pnode)
9332
      elif instance_info.fail_msg:
9333
        self.warn.append("Can't get instance runtime information: %s" %
9334
                        instance_info.fail_msg)
9335
      else:
9336
        if instance_info.payload:
9337
          current_mem = int(instance_info.payload['memory'])
9338
        else:
9339
          # Assume instance not running
9340
          # (there is a slight race condition here, but it's not very probable,
9341
          # and we have no other way to check)
9342
          current_mem = 0
9343
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
9344
                    pninfo.payload['memory_free'])
9345
        if miss_mem > 0:
9346
          raise errors.OpPrereqError("This change will prevent the instance"
9347
                                     " from starting, due to %d MB of memory"
9348
                                     " missing on its primary node" % miss_mem,
9349
                                     errors.ECODE_NORES)
9350

    
9351
      if be_new[constants.BE_AUTO_BALANCE]:
9352
        for node, nres in nodeinfo.items():
9353
          if node not in instance.secondary_nodes:
9354
            continue
9355
          msg = nres.fail_msg
9356
          if msg:
9357
            self.warn.append("Can't get info from secondary node %s: %s" %
9358
                             (node, msg))
9359
          elif not isinstance(nres.payload.get('memory_free', None), int):
9360
            self.warn.append("Secondary node %s didn't return free"
9361
                             " memory information" % node)
9362
          elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
9363
            self.warn.append("Not enough memory to failover instance to"
9364
                             " secondary node %s" % node)
9365

    
9366
    # NIC processing
9367
    self.nic_pnew = {}
9368
    self.nic_pinst = {}
9369
    for nic_op, nic_dict in self.op.nics:
9370
      if nic_op == constants.DDM_REMOVE:
9371
        if not instance.nics:
9372
          raise errors.OpPrereqError("Instance has no NICs, cannot remove",
9373
                                     errors.ECODE_INVAL)
9374
        continue
9375
      if nic_op != constants.DDM_ADD:
9376
        # an existing nic
9377
        if not instance.nics:
9378
          raise errors.OpPrereqError("Invalid NIC index %s, instance has"
9379
                                     " no NICs" % nic_op,
9380
                                     errors.ECODE_INVAL)
9381
        if nic_op < 0 or nic_op >= len(instance.nics):
9382
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
9383
                                     " are 0 to %d" %
9384
                                     (nic_op, len(instance.nics) - 1),
9385
                                     errors.ECODE_INVAL)
9386
        old_nic_params = instance.nics[nic_op].nicparams
9387
        old_nic_ip = instance.nics[nic_op].ip
9388
      else:
9389
        old_nic_params = {}
9390
        old_nic_ip = None
9391

    
9392
      update_params_dict = dict([(key, nic_dict[key])
9393
                                 for key in constants.NICS_PARAMETERS
9394
                                 if key in nic_dict])
9395

    
9396
      if 'bridge' in nic_dict:
9397
        update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
9398

    
9399
      new_nic_params = _GetUpdatedParams(old_nic_params,
9400
                                         update_params_dict)
9401
      utils.ForceDictType(new_nic_params, constants.NICS_PARAMETER_TYPES)
9402
      new_filled_nic_params = cluster.SimpleFillNIC(new_nic_params)
9403
      objects.NIC.CheckParameterSyntax(new_filled_nic_params)
9404
      self.nic_pinst[nic_op] = new_nic_params
9405
      self.nic_pnew[nic_op] = new_filled_nic_params
9406
      new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
9407

    
9408
      if new_nic_mode == constants.NIC_MODE_BRIDGED:
9409
        nic_bridge = new_filled_nic_params[constants.NIC_LINK]
9410
        msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
9411
        if msg:
9412
          msg = "Error checking bridges on node %s: %s" % (pnode, msg)
9413
          if self.op.force:
9414
            self.warn.append(msg)
9415
          else:
9416
            raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
9417
      if new_nic_mode == constants.NIC_MODE_ROUTED:
9418
        if 'ip' in nic_dict:
9419
          nic_ip = nic_dict['ip']
9420
        else:
9421
          nic_ip = old_nic_ip
9422
        if nic_ip is None:
9423
          raise errors.OpPrereqError('Cannot set the nic ip to None'
9424
                                     ' on a routed nic', errors.ECODE_INVAL)
9425
      if 'mac' in nic_dict:
9426
        nic_mac = nic_dict['mac']
9427
        if nic_mac is None:
9428
          raise errors.OpPrereqError('Cannot set the nic mac to None',
9429
                                     errors.ECODE_INVAL)
9430
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
9431
          # otherwise generate the mac
9432
          nic_dict['mac'] = self.cfg.GenerateMAC(self.proc.GetECId())
9433
        else:
9434
          # or validate/reserve the current one
9435
          try:
9436
            self.cfg.ReserveMAC(nic_mac, self.proc.GetECId())
9437
          except errors.ReservationError:
9438
            raise errors.OpPrereqError("MAC address %s already in use"
9439
                                       " in cluster" % nic_mac,
9440
                                       errors.ECODE_NOTUNIQUE)
9441

    
9442
    # DISK processing
9443
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
9444
      raise errors.OpPrereqError("Disk operations not supported for"
9445
                                 " diskless instances",
9446
                                 errors.ECODE_INVAL)
9447
    for disk_op, _ in self.op.disks:
9448
      if disk_op == constants.DDM_REMOVE:
9449
        if len(instance.disks) == 1:
9450
          raise errors.OpPrereqError("Cannot remove the last disk of"
9451
                                     " an instance", errors.ECODE_INVAL)
9452
        _CheckInstanceDown(self, instance, "cannot remove disks")
9453

    
9454
      if (disk_op == constants.DDM_ADD and
9455
          len(instance.disks) >= constants.MAX_DISKS):
9456
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
9457
                                   " add more" % constants.MAX_DISKS,
9458
                                   errors.ECODE_STATE)
9459
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
9460
        # an existing disk
9461
        if disk_op < 0 or disk_op >= len(instance.disks):
9462
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
9463
                                     " are 0 to %d" %
9464
                                     (disk_op, len(instance.disks)),
9465
                                     errors.ECODE_INVAL)
9466

    
9467
    return
9468

    
9469
  def _ConvertPlainToDrbd(self, feedback_fn):
9470
    """Converts an instance from plain to drbd.
9471

9472
    """
9473
    feedback_fn("Converting template to drbd")
9474
    instance = self.instance
9475
    pnode = instance.primary_node
9476
    snode = self.op.remote_node
9477

    
9478
    # create a fake disk info for _GenerateDiskTemplate
9479
    disk_info = [{"size": d.size, "mode": d.mode} for d in instance.disks]
9480
    new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
9481
                                      instance.name, pnode, [snode],
9482
                                      disk_info, None, None, 0, feedback_fn)
9483
    info = _GetInstanceInfoText(instance)
9484
    feedback_fn("Creating aditional volumes...")
9485
    # first, create the missing data and meta devices
9486
    for disk in new_disks:
9487
      # unfortunately this is... not too nice
9488
      _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
9489
                            info, True)
9490
      for child in disk.children:
9491
        _CreateSingleBlockDev(self, snode, instance, child, info, True)
9492
    # at this stage, all new LVs have been created, we can rename the
9493
    # old ones
9494
    feedback_fn("Renaming original volumes...")
9495
    rename_list = [(o, n.children[0].logical_id)
9496
                   for (o, n) in zip(instance.disks, new_disks)]
9497
    result = self.rpc.call_blockdev_rename(pnode, rename_list)
9498
    result.Raise("Failed to rename original LVs")
9499

    
9500
    feedback_fn("Initializing DRBD devices...")
9501
    # all child devices are in place, we can now create the DRBD devices
9502
    for disk in new_disks:
9503
      for node in [pnode, snode]:
9504
        f_create = node == pnode
9505
        _CreateSingleBlockDev(self, node, instance, disk, info, f_create)
9506

    
9507
    # at this point, the instance has been modified
9508
    instance.disk_template = constants.DT_DRBD8
9509
    instance.disks = new_disks
9510
    self.cfg.Update(instance, feedback_fn)
9511

    
9512
    # disks are created, waiting for sync
9513
    disk_abort = not _WaitForSync(self, instance)
9514
    if disk_abort:
9515
      raise errors.OpExecError("There are some degraded disks for"
9516
                               " this instance, please cleanup manually")
9517

    
9518
  def _ConvertDrbdToPlain(self, feedback_fn):
9519
    """Converts an instance from drbd to plain.
9520

9521
    """
9522
    instance = self.instance
9523
    assert len(instance.secondary_nodes) == 1
9524
    pnode = instance.primary_node
9525
    snode = instance.secondary_nodes[0]
9526
    feedback_fn("Converting template to plain")
9527

    
9528
    old_disks = instance.disks
9529
    new_disks = [d.children[0] for d in old_disks]
9530

    
9531
    # copy over size and mode
9532
    for parent, child in zip(old_disks, new_disks):
9533
      child.size = parent.size
9534
      child.mode = parent.mode
9535

    
9536
    # update instance structure
9537
    instance.disks = new_disks
9538
    instance.disk_template = constants.DT_PLAIN
9539
    self.cfg.Update(instance, feedback_fn)
9540

    
9541
    feedback_fn("Removing volumes on the secondary node...")
9542
    for disk in old_disks:
9543
      self.cfg.SetDiskID(disk, snode)
9544
      msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
9545
      if msg:
9546
        self.LogWarning("Could not remove block device %s on node %s,"
9547
                        " continuing anyway: %s", disk.iv_name, snode, msg)
9548

    
9549
    feedback_fn("Removing unneeded volumes on the primary node...")
9550
    for idx, disk in enumerate(old_disks):
9551
      meta = disk.children[1]
9552
      self.cfg.SetDiskID(meta, pnode)
9553
      msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
9554
      if msg:
9555
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
9556
                        " continuing anyway: %s", idx, pnode, msg)
9557

    
9558
  def Exec(self, feedback_fn):
9559
    """Modifies an instance.
9560

9561
    All parameters take effect only at the next restart of the instance.
9562

9563
    """
9564
    # Process here the warnings from CheckPrereq, as we don't have a
9565
    # feedback_fn there.
9566
    for warn in self.warn:
9567
      feedback_fn("WARNING: %s" % warn)
9568

    
9569
    result = []
9570
    instance = self.instance
9571
    # disk changes
9572
    for disk_op, disk_dict in self.op.disks:
9573
      if disk_op == constants.DDM_REMOVE:
9574
        # remove the last disk
9575
        device = instance.disks.pop()
9576
        device_idx = len(instance.disks)
9577
        for node, disk in device.ComputeNodeTree(instance.primary_node):
9578
          self.cfg.SetDiskID(disk, node)
9579
          msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
9580
          if msg:
9581
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
9582
                            " continuing anyway", device_idx, node, msg)
9583
        result.append(("disk/%d" % device_idx, "remove"))
9584
      elif disk_op == constants.DDM_ADD:
9585
        # add a new disk
9586
        if instance.disk_template in (constants.DT_FILE,
9587
                                        constants.DT_SHARED_FILE):
9588
          file_driver, file_path = instance.disks[0].logical_id
9589
          file_path = os.path.dirname(file_path)
9590
        else:
9591
          file_driver = file_path = None
9592
        disk_idx_base = len(instance.disks)
9593
        new_disk = _GenerateDiskTemplate(self,
9594
                                         instance.disk_template,
9595
                                         instance.name, instance.primary_node,
9596
                                         instance.secondary_nodes,
9597
                                         [disk_dict],
9598
                                         file_path,
9599
                                         file_driver,
9600
                                         disk_idx_base, feedback_fn)[0]
9601
        instance.disks.append(new_disk)
9602
        info = _GetInstanceInfoText(instance)
9603

    
9604
        logging.info("Creating volume %s for instance %s",
9605
                     new_disk.iv_name, instance.name)
9606
        # Note: this needs to be kept in sync with _CreateDisks
9607
        #HARDCODE
9608
        for node in instance.all_nodes:
9609
          f_create = node == instance.primary_node
9610
          try:
9611
            _CreateBlockDev(self, node, instance, new_disk,
9612
                            f_create, info, f_create)
9613
          except errors.OpExecError, err:
9614
            self.LogWarning("Failed to create volume %s (%s) on"
9615
                            " node %s: %s",
9616
                            new_disk.iv_name, new_disk, node, err)
9617
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
9618
                       (new_disk.size, new_disk.mode)))
9619
      else:
9620
        # change a given disk
9621
        instance.disks[disk_op].mode = disk_dict['mode']
9622
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
9623

    
9624
    if self.op.disk_template:
9625
      r_shut = _ShutdownInstanceDisks(self, instance)
9626
      if not r_shut:
9627
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
9628
                                 " proceed with disk template conversion")
9629
      mode = (instance.disk_template, self.op.disk_template)
9630
      try:
9631
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
9632
      except:
9633
        self.cfg.ReleaseDRBDMinors(instance.name)
9634
        raise
9635
      result.append(("disk_template", self.op.disk_template))
9636

    
9637
    # NIC changes
9638
    for nic_op, nic_dict in self.op.nics:
9639
      if nic_op == constants.DDM_REMOVE:
9640
        # remove the last nic
9641
        del instance.nics[-1]
9642
        result.append(("nic.%d" % len(instance.nics), "remove"))
9643
      elif nic_op == constants.DDM_ADD:
9644
        # mac and bridge should be set, by now
9645
        mac = nic_dict['mac']
9646
        ip = nic_dict.get('ip', None)
9647
        nicparams = self.nic_pinst[constants.DDM_ADD]
9648
        new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
9649
        instance.nics.append(new_nic)
9650
        result.append(("nic.%d" % (len(instance.nics) - 1),
9651
                       "add:mac=%s,ip=%s,mode=%s,link=%s" %
9652
                       (new_nic.mac, new_nic.ip,
9653
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
9654
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
9655
                       )))
9656
      else:
9657
        for key in 'mac', 'ip':
9658
          if key in nic_dict:
9659
            setattr(instance.nics[nic_op], key, nic_dict[key])
9660
        if nic_op in self.nic_pinst:
9661
          instance.nics[nic_op].nicparams = self.nic_pinst[nic_op]
9662
        for key, val in nic_dict.iteritems():
9663
          result.append(("nic.%s/%d" % (key, nic_op), val))
9664

    
9665
    # hvparams changes
9666
    if self.op.hvparams:
9667
      instance.hvparams = self.hv_inst
9668
      for key, val in self.op.hvparams.iteritems():
9669
        result.append(("hv/%s" % key, val))
9670

    
9671
    # beparams changes
9672
    if self.op.beparams:
9673
      instance.beparams = self.be_inst
9674
      for key, val in self.op.beparams.iteritems():
9675
        result.append(("be/%s" % key, val))
9676

    
9677
    # OS change
9678
    if self.op.os_name:
9679
      instance.os = self.op.os_name
9680

    
9681
    # osparams changes
9682
    if self.op.osparams:
9683
      instance.osparams = self.os_inst
9684
      for key, val in self.op.osparams.iteritems():
9685
        result.append(("os/%s" % key, val))
9686

    
9687
    self.cfg.Update(instance, feedback_fn)
9688

    
9689
    return result
9690

    
9691
  _DISK_CONVERSIONS = {
9692
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
9693
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
9694
    }
9695

    
9696

    
9697
class LUBackupQuery(NoHooksLU):
9698
  """Query the exports list
9699

9700
  """
9701
  REQ_BGL = False
9702

    
9703
  def ExpandNames(self):
9704
    self.needed_locks = {}
9705
    self.share_locks[locking.LEVEL_NODE] = 1
9706
    if not self.op.nodes:
9707
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9708
    else:
9709
      self.needed_locks[locking.LEVEL_NODE] = \
9710
        _GetWantedNodes(self, self.op.nodes)
9711

    
9712
  def Exec(self, feedback_fn):
9713
    """Compute the list of all the exported system images.
9714

9715
    @rtype: dict
9716
    @return: a dictionary with the structure node->(export-list)
9717
        where export-list is a list of the instances exported on
9718
        that node.
9719

9720
    """
9721
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
9722
    rpcresult = self.rpc.call_export_list(self.nodes)
9723
    result = {}
9724
    for node in rpcresult:
9725
      if rpcresult[node].fail_msg:
9726
        result[node] = False
9727
      else:
9728
        result[node] = rpcresult[node].payload
9729

    
9730
    return result
9731

    
9732

    
9733
class LUBackupPrepare(NoHooksLU):
9734
  """Prepares an instance for an export and returns useful information.
9735

9736
  """
9737
  REQ_BGL = False
9738

    
9739
  def ExpandNames(self):
9740
    self._ExpandAndLockInstance()
9741

    
9742
  def CheckPrereq(self):
9743
    """Check prerequisites.
9744

9745
    """
9746
    instance_name = self.op.instance_name
9747

    
9748
    self.instance = self.cfg.GetInstanceInfo(instance_name)
9749
    assert self.instance is not None, \
9750
          "Cannot retrieve locked instance %s" % self.op.instance_name
9751
    _CheckNodeOnline(self, self.instance.primary_node)
9752

    
9753
    self._cds = _GetClusterDomainSecret()
9754

    
9755
  def Exec(self, feedback_fn):
9756
    """Prepares an instance for an export.
9757

9758
    """
9759
    instance = self.instance
9760

    
9761
    if self.op.mode == constants.EXPORT_MODE_REMOTE:
9762
      salt = utils.GenerateSecret(8)
9763

    
9764
      feedback_fn("Generating X509 certificate on %s" % instance.primary_node)
9765
      result = self.rpc.call_x509_cert_create(instance.primary_node,
9766
                                              constants.RIE_CERT_VALIDITY)
9767
      result.Raise("Can't create X509 key and certificate on %s" % result.node)
9768

    
9769
      (name, cert_pem) = result.payload
9770

    
9771
      cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
9772
                                             cert_pem)
9773

    
9774
      return {
9775
        "handshake": masterd.instance.ComputeRemoteExportHandshake(self._cds),
9776
        "x509_key_name": (name, utils.Sha1Hmac(self._cds, name, salt=salt),
9777
                          salt),
9778
        "x509_ca": utils.SignX509Certificate(cert, self._cds, salt),
9779
        }
9780

    
9781
    return None
9782

    
9783

    
9784
class LUBackupExport(LogicalUnit):
9785
  """Export an instance to an image in the cluster.
9786

9787
  """
9788
  HPATH = "instance-export"
9789
  HTYPE = constants.HTYPE_INSTANCE
9790
  REQ_BGL = False
9791

    
9792
  def CheckArguments(self):
9793
    """Check the arguments.
9794

9795
    """
9796
    self.x509_key_name = self.op.x509_key_name
9797
    self.dest_x509_ca_pem = self.op.destination_x509_ca
9798

    
9799
    if self.op.mode == constants.EXPORT_MODE_REMOTE:
9800
      if not self.x509_key_name:
9801
        raise errors.OpPrereqError("Missing X509 key name for encryption",
9802
                                   errors.ECODE_INVAL)
9803

    
9804
      if not self.dest_x509_ca_pem:
9805
        raise errors.OpPrereqError("Missing destination X509 CA",
9806
                                   errors.ECODE_INVAL)
9807

    
9808
  def ExpandNames(self):
9809
    self._ExpandAndLockInstance()
9810

    
9811
    # Lock all nodes for local exports
9812
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
9813
      # FIXME: lock only instance primary and destination node
9814
      #
9815
      # Sad but true, for now we have do lock all nodes, as we don't know where
9816
      # the previous export might be, and in this LU we search for it and
9817
      # remove it from its current node. In the future we could fix this by:
9818
      #  - making a tasklet to search (share-lock all), then create the
9819
      #    new one, then one to remove, after
9820
      #  - removing the removal operation altogether
9821
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9822

    
9823
  def DeclareLocks(self, level):
9824
    """Last minute lock declaration."""
9825
    # All nodes are locked anyway, so nothing to do here.
9826

    
9827
  def BuildHooksEnv(self):
9828
    """Build hooks env.
9829

9830
    This will run on the master, primary node and target node.
9831

9832
    """
9833
    env = {
9834
      "EXPORT_MODE": self.op.mode,
9835
      "EXPORT_NODE": self.op.target_node,
9836
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
9837
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
9838
      # TODO: Generic function for boolean env variables
9839
      "REMOVE_INSTANCE": str(bool(self.op.remove_instance)),
9840
      }
9841

    
9842
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
9843

    
9844
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node]
9845

    
9846
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
9847
      nl.append(self.op.target_node)
9848

    
9849
    return env, nl, nl
9850

    
9851
  def CheckPrereq(self):
9852
    """Check prerequisites.
9853

9854
    This checks that the instance and node names are valid.
9855

9856
    """
9857
    instance_name = self.op.instance_name
9858

    
9859
    self.instance = self.cfg.GetInstanceInfo(instance_name)
9860
    assert self.instance is not None, \
9861
          "Cannot retrieve locked instance %s" % self.op.instance_name
9862
    _CheckNodeOnline(self, self.instance.primary_node)
9863

    
9864
    if (self.op.remove_instance and self.instance.admin_up and
9865
        not self.op.shutdown):
9866
      raise errors.OpPrereqError("Can not remove instance without shutting it"
9867
                                 " down before")
9868

    
9869
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
9870
      self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
9871
      self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
9872
      assert self.dst_node is not None
9873

    
9874
      _CheckNodeOnline(self, self.dst_node.name)
9875
      _CheckNodeNotDrained(self, self.dst_node.name)
9876

    
9877
      self._cds = None
9878
      self.dest_disk_info = None
9879
      self.dest_x509_ca = None
9880

    
9881
    elif self.op.mode == constants.EXPORT_MODE_REMOTE:
9882
      self.dst_node = None
9883

    
9884
      if len(self.op.target_node) != len(self.instance.disks):
9885
        raise errors.OpPrereqError(("Received destination information for %s"
9886
                                    " disks, but instance %s has %s disks") %
9887
                                   (len(self.op.target_node), instance_name,
9888
                                    len(self.instance.disks)),
9889
                                   errors.ECODE_INVAL)
9890

    
9891
      cds = _GetClusterDomainSecret()
9892

    
9893
      # Check X509 key name
9894
      try:
9895
        (key_name, hmac_digest, hmac_salt) = self.x509_key_name
9896
      except (TypeError, ValueError), err:
9897
        raise errors.OpPrereqError("Invalid data for X509 key name: %s" % err)
9898

    
9899
      if not utils.VerifySha1Hmac(cds, key_name, hmac_digest, salt=hmac_salt):
9900
        raise errors.OpPrereqError("HMAC for X509 key name is wrong",
9901
                                   errors.ECODE_INVAL)
9902

    
9903
      # Load and verify CA
9904
      try:
9905
        (cert, _) = utils.LoadSignedX509Certificate(self.dest_x509_ca_pem, cds)
9906
      except OpenSSL.crypto.Error, err:
9907
        raise errors.OpPrereqError("Unable to load destination X509 CA (%s)" %
9908
                                   (err, ), errors.ECODE_INVAL)
9909

    
9910
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
9911
      if errcode is not None:
9912
        raise errors.OpPrereqError("Invalid destination X509 CA (%s)" %
9913
                                   (msg, ), errors.ECODE_INVAL)
9914

    
9915
      self.dest_x509_ca = cert
9916

    
9917
      # Verify target information
9918
      disk_info = []
9919
      for idx, disk_data in enumerate(self.op.target_node):
9920
        try:
9921
          (host, port, magic) = \
9922
            masterd.instance.CheckRemoteExportDiskInfo(cds, idx, disk_data)
9923
        except errors.GenericError, err:
9924
          raise errors.OpPrereqError("Target info for disk %s: %s" %
9925
                                     (idx, err), errors.ECODE_INVAL)
9926

    
9927
        disk_info.append((host, port, magic))
9928

    
9929
      assert len(disk_info) == len(self.op.target_node)
9930
      self.dest_disk_info = disk_info
9931

    
9932
    else:
9933
      raise errors.ProgrammerError("Unhandled export mode %r" %
9934
                                   self.op.mode)
9935

    
9936
    # instance disk type verification
9937
    # TODO: Implement export support for file-based disks
9938
    for disk in self.instance.disks:
9939
      if disk.dev_type == constants.LD_FILE:
9940
        raise errors.OpPrereqError("Export not supported for instances with"
9941
                                   " file-based disks", errors.ECODE_INVAL)
9942

    
9943
  def _CleanupExports(self, feedback_fn):
9944
    """Removes exports of current instance from all other nodes.
9945

9946
    If an instance in a cluster with nodes A..D was exported to node C, its
9947
    exports will be removed from the nodes A, B and D.
9948

9949
    """
9950
    assert self.op.mode != constants.EXPORT_MODE_REMOTE
9951

    
9952
    nodelist = self.cfg.GetNodeList()
9953
    nodelist.remove(self.dst_node.name)
9954

    
9955
    # on one-node clusters nodelist will be empty after the removal
9956
    # if we proceed the backup would be removed because OpBackupQuery
9957
    # substitutes an empty list with the full cluster node list.
9958
    iname = self.instance.name
9959
    if nodelist:
9960
      feedback_fn("Removing old exports for instance %s" % iname)
9961
      exportlist = self.rpc.call_export_list(nodelist)
9962
      for node in exportlist:
9963
        if exportlist[node].fail_msg:
9964
          continue
9965
        if iname in exportlist[node].payload:
9966
          msg = self.rpc.call_export_remove(node, iname).fail_msg
9967
          if msg:
9968
            self.LogWarning("Could not remove older export for instance %s"
9969
                            " on node %s: %s", iname, node, msg)
9970

    
9971
  def Exec(self, feedback_fn):
9972
    """Export an instance to an image in the cluster.
9973

9974
    """
9975
    assert self.op.mode in constants.EXPORT_MODES
9976

    
9977
    instance = self.instance
9978
    src_node = instance.primary_node
9979

    
9980
    if self.op.shutdown:
9981
      # shutdown the instance, but not the disks
9982
      feedback_fn("Shutting down instance %s" % instance.name)
9983
      result = self.rpc.call_instance_shutdown(src_node, instance,
9984
                                               self.op.shutdown_timeout)
9985
      # TODO: Maybe ignore failures if ignore_remove_failures is set
9986
      result.Raise("Could not shutdown instance %s on"
9987
                   " node %s" % (instance.name, src_node))
9988

    
9989
    # set the disks ID correctly since call_instance_start needs the
9990
    # correct drbd minor to create the symlinks
9991
    for disk in instance.disks:
9992
      self.cfg.SetDiskID(disk, src_node)
9993

    
9994
    activate_disks = (not instance.admin_up)
9995

    
9996
    if activate_disks:
9997
      # Activate the instance disks if we'exporting a stopped instance
9998
      feedback_fn("Activating disks for %s" % instance.name)
9999
      _StartInstanceDisks(self, instance, None)
10000

    
10001
    try:
10002
      helper = masterd.instance.ExportInstanceHelper(self, feedback_fn,
10003
                                                     instance)
10004

    
10005
      helper.CreateSnapshots()
10006
      try:
10007
        if (self.op.shutdown and instance.admin_up and
10008
            not self.op.remove_instance):
10009
          assert not activate_disks
10010
          feedback_fn("Starting instance %s" % instance.name)
10011
          result = self.rpc.call_instance_start(src_node, instance, None, None)
10012
          msg = result.fail_msg
10013
          if msg:
10014
            feedback_fn("Failed to start instance: %s" % msg)
10015
            _ShutdownInstanceDisks(self, instance)
10016
            raise errors.OpExecError("Could not start instance: %s" % msg)
10017

    
10018
        if self.op.mode == constants.EXPORT_MODE_LOCAL:
10019
          (fin_resu, dresults) = helper.LocalExport(self.dst_node)
10020
        elif self.op.mode == constants.EXPORT_MODE_REMOTE:
10021
          connect_timeout = constants.RIE_CONNECT_TIMEOUT
10022
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
10023

    
10024
          (key_name, _, _) = self.x509_key_name
10025

    
10026
          dest_ca_pem = \
10027
            OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
10028
                                            self.dest_x509_ca)
10029

    
10030
          (fin_resu, dresults) = helper.RemoteExport(self.dest_disk_info,
10031
                                                     key_name, dest_ca_pem,
10032
                                                     timeouts)
10033
      finally:
10034
        helper.Cleanup()
10035

    
10036
      # Check for backwards compatibility
10037
      assert len(dresults) == len(instance.disks)
10038
      assert compat.all(isinstance(i, bool) for i in dresults), \
10039
             "Not all results are boolean: %r" % dresults
10040

    
10041
    finally:
10042
      if activate_disks:
10043
        feedback_fn("Deactivating disks for %s" % instance.name)
10044
        _ShutdownInstanceDisks(self, instance)
10045

    
10046
    if not (compat.all(dresults) and fin_resu):
10047
      failures = []
10048
      if not fin_resu:
10049
        failures.append("export finalization")
10050
      if not compat.all(dresults):
10051
        fdsk = utils.CommaJoin(idx for (idx, dsk) in enumerate(dresults)
10052
                               if not dsk)
10053
        failures.append("disk export: disk(s) %s" % fdsk)
10054

    
10055
      raise errors.OpExecError("Export failed, errors in %s" %
10056
                               utils.CommaJoin(failures))
10057

    
10058
    # At this point, the export was successful, we can cleanup/finish
10059

    
10060
    # Remove instance if requested
10061
    if self.op.remove_instance:
10062
      feedback_fn("Removing instance %s" % instance.name)
10063
      _RemoveInstance(self, feedback_fn, instance,
10064
                      self.op.ignore_remove_failures)
10065

    
10066
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
10067
      self._CleanupExports(feedback_fn)
10068

    
10069
    return fin_resu, dresults
10070

    
10071

    
10072
class LUBackupRemove(NoHooksLU):
10073
  """Remove exports related to the named instance.
10074

10075
  """
10076
  REQ_BGL = False
10077

    
10078
  def ExpandNames(self):
10079
    self.needed_locks = {}
10080
    # We need all nodes to be locked in order for RemoveExport to work, but we
10081
    # don't need to lock the instance itself, as nothing will happen to it (and
10082
    # we can remove exports also for a removed instance)
10083
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
10084

    
10085
  def Exec(self, feedback_fn):
10086
    """Remove any export.
10087

10088
    """
10089
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
10090
    # If the instance was not found we'll try with the name that was passed in.
10091
    # This will only work if it was an FQDN, though.
10092
    fqdn_warn = False
10093
    if not instance_name:
10094
      fqdn_warn = True
10095
      instance_name = self.op.instance_name
10096

    
10097
    locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
10098
    exportlist = self.rpc.call_export_list(locked_nodes)
10099
    found = False
10100
    for node in exportlist:
10101
      msg = exportlist[node].fail_msg
10102
      if msg:
10103
        self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
10104
        continue
10105
      if instance_name in exportlist[node].payload:
10106
        found = True
10107
        result = self.rpc.call_export_remove(node, instance_name)
10108
        msg = result.fail_msg
10109
        if msg:
10110
          logging.error("Could not remove export for instance %s"
10111
                        " on node %s: %s", instance_name, node, msg)
10112

    
10113
    if fqdn_warn and not found:
10114
      feedback_fn("Export not found. If trying to remove an export belonging"
10115
                  " to a deleted instance please use its Fully Qualified"
10116
                  " Domain Name.")
10117

    
10118

    
10119
class LUGroupAdd(LogicalUnit):
10120
  """Logical unit for creating node groups.
10121

10122
  """
10123
  HPATH = "group-add"
10124
  HTYPE = constants.HTYPE_GROUP
10125
  REQ_BGL = False
10126

    
10127
  def ExpandNames(self):
10128
    # We need the new group's UUID here so that we can create and acquire the
10129
    # corresponding lock. Later, in Exec(), we'll indicate to cfg.AddNodeGroup
10130
    # that it should not check whether the UUID exists in the configuration.
10131
    self.group_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
10132
    self.needed_locks = {}
10133
    self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
10134

    
10135
  def CheckPrereq(self):
10136
    """Check prerequisites.
10137

10138
    This checks that the given group name is not an existing node group
10139
    already.
10140

10141
    """
10142
    try:
10143
      existing_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10144
    except errors.OpPrereqError:
10145
      pass
10146
    else:
10147
      raise errors.OpPrereqError("Desired group name '%s' already exists as a"
10148
                                 " node group (UUID: %s)" %
10149
                                 (self.op.group_name, existing_uuid),
10150
                                 errors.ECODE_EXISTS)
10151

    
10152
    if self.op.ndparams:
10153
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
10154

    
10155
  def BuildHooksEnv(self):
10156
    """Build hooks env.
10157

10158
    """
10159
    env = {
10160
      "GROUP_NAME": self.op.group_name,
10161
      }
10162
    mn = self.cfg.GetMasterNode()
10163
    return env, [mn], [mn]
10164

    
10165
  def Exec(self, feedback_fn):
10166
    """Add the node group to the cluster.
10167

10168
    """
10169
    group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
10170
                                  uuid=self.group_uuid,
10171
                                  alloc_policy=self.op.alloc_policy,
10172
                                  ndparams=self.op.ndparams)
10173

    
10174
    self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
10175
    del self.remove_locks[locking.LEVEL_NODEGROUP]
10176

    
10177

    
10178
class LUGroupAssignNodes(NoHooksLU):
10179
  """Logical unit for assigning nodes to groups.
10180

10181
  """
10182
  REQ_BGL = False
10183

    
10184
  def ExpandNames(self):
10185
    # These raise errors.OpPrereqError on their own:
10186
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10187
    self.op.nodes = _GetWantedNodes(self, self.op.nodes)
10188

    
10189
    # We want to lock all the affected nodes and groups. We have readily
10190
    # available the list of nodes, and the *destination* group. To gather the
10191
    # list of "source" groups, we need to fetch node information.
10192
    self.node_data = self.cfg.GetAllNodesInfo()
10193
    affected_groups = set(self.node_data[node].group for node in self.op.nodes)
10194
    affected_groups.add(self.group_uuid)
10195

    
10196
    self.needed_locks = {
10197
      locking.LEVEL_NODEGROUP: list(affected_groups),
10198
      locking.LEVEL_NODE: self.op.nodes,
10199
      }
10200

    
10201
  def CheckPrereq(self):
10202
    """Check prerequisites.
10203

10204
    """
10205
    self.group = self.cfg.GetNodeGroup(self.group_uuid)
10206
    instance_data = self.cfg.GetAllInstancesInfo()
10207

    
10208
    if self.group is None:
10209
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
10210
                               (self.op.group_name, self.group_uuid))
10211

    
10212
    (new_splits, previous_splits) = \
10213
      self.CheckAssignmentForSplitInstances([(node, self.group_uuid)
10214
                                             for node in self.op.nodes],
10215
                                            self.node_data, instance_data)
10216

    
10217
    if new_splits:
10218
      fmt_new_splits = utils.CommaJoin(utils.NiceSort(new_splits))
10219

    
10220
      if not self.op.force:
10221
        raise errors.OpExecError("The following instances get split by this"
10222
                                 " change and --force was not given: %s" %
10223
                                 fmt_new_splits)
10224
      else:
10225
        self.LogWarning("This operation will split the following instances: %s",
10226
                        fmt_new_splits)
10227

    
10228
        if previous_splits:
10229
          self.LogWarning("In addition, these already-split instances continue"
10230
                          " to be spit across groups: %s",
10231
                          utils.CommaJoin(utils.NiceSort(previous_splits)))
10232

    
10233
  def Exec(self, feedback_fn):
10234
    """Assign nodes to a new group.
10235

10236
    """
10237
    for node in self.op.nodes:
10238
      self.node_data[node].group = self.group_uuid
10239

    
10240
    self.cfg.Update(self.group, feedback_fn) # Saves all modified nodes.
10241

    
10242
  @staticmethod
10243
  def CheckAssignmentForSplitInstances(changes, node_data, instance_data):
10244
    """Check for split instances after a node assignment.
10245

10246
    This method considers a series of node assignments as an atomic operation,
10247
    and returns information about split instances after applying the set of
10248
    changes.
10249

10250
    In particular, it returns information about newly split instances, and
10251
    instances that were already split, and remain so after the change.
10252

10253
    Only instances whose disk template is listed in constants.DTS_NET_MIRROR are
10254
    considered.
10255

10256
    @type changes: list of (node_name, new_group_uuid) pairs.
10257
    @param changes: list of node assignments to consider.
10258
    @param node_data: a dict with data for all nodes
10259
    @param instance_data: a dict with all instances to consider
10260
    @rtype: a two-tuple
10261
    @return: a list of instances that were previously okay and result split as a
10262
      consequence of this change, and a list of instances that were previously
10263
      split and this change does not fix.
10264

10265
    """
10266
    changed_nodes = dict((node, group) for node, group in changes
10267
                         if node_data[node].group != group)
10268

    
10269
    all_split_instances = set()
10270
    previously_split_instances = set()
10271

    
10272
    def InstanceNodes(instance):
10273
      return [instance.primary_node] + list(instance.secondary_nodes)
10274

    
10275
    for inst in instance_data.values():
10276
      if inst.disk_template not in constants.DTS_NET_MIRROR:
10277
        continue
10278

    
10279
      instance_nodes = InstanceNodes(inst)
10280

    
10281
      if len(set(node_data[node].group for node in instance_nodes)) > 1:
10282
        previously_split_instances.add(inst.name)
10283

    
10284
      if len(set(changed_nodes.get(node, node_data[node].group)
10285
                 for node in instance_nodes)) > 1:
10286
        all_split_instances.add(inst.name)
10287

    
10288
    return (list(all_split_instances - previously_split_instances),
10289
            list(previously_split_instances & all_split_instances))
10290

    
10291

    
10292
class _GroupQuery(_QueryBase):
10293
  FIELDS = query.GROUP_FIELDS
10294

    
10295
  def ExpandNames(self, lu):
10296
    lu.needed_locks = {}
10297

    
10298
    self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
10299
    name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values())
10300

    
10301
    if not self.names:
10302
      self.wanted = [name_to_uuid[name]
10303
                     for name in utils.NiceSort(name_to_uuid.keys())]
10304
    else:
10305
      # Accept names to be either names or UUIDs.
10306
      missing = []
10307
      self.wanted = []
10308
      all_uuid = frozenset(self._all_groups.keys())
10309

    
10310
      for name in self.names:
10311
        if name in all_uuid:
10312
          self.wanted.append(name)
10313
        elif name in name_to_uuid:
10314
          self.wanted.append(name_to_uuid[name])
10315
        else:
10316
          missing.append(name)
10317

    
10318
      if missing:
10319
        raise errors.OpPrereqError("Some groups do not exist: %s" % missing,
10320
                                   errors.ECODE_NOENT)
10321

    
10322
  def DeclareLocks(self, lu, level):
10323
    pass
10324

    
10325
  def _GetQueryData(self, lu):
10326
    """Computes the list of node groups and their attributes.
10327

10328
    """
10329
    do_nodes = query.GQ_NODE in self.requested_data
10330
    do_instances = query.GQ_INST in self.requested_data
10331

    
10332
    group_to_nodes = None
10333
    group_to_instances = None
10334

    
10335
    # For GQ_NODE, we need to map group->[nodes], and group->[instances] for
10336
    # GQ_INST. The former is attainable with just GetAllNodesInfo(), but for the
10337
    # latter GetAllInstancesInfo() is not enough, for we have to go through
10338
    # instance->node. Hence, we will need to process nodes even if we only need
10339
    # instance information.
10340
    if do_nodes or do_instances:
10341
      all_nodes = lu.cfg.GetAllNodesInfo()
10342
      group_to_nodes = dict((uuid, []) for uuid in self.wanted)
10343
      node_to_group = {}
10344

    
10345
      for node in all_nodes.values():
10346
        if node.group in group_to_nodes:
10347
          group_to_nodes[node.group].append(node.name)
10348
          node_to_group[node.name] = node.group
10349

    
10350
      if do_instances:
10351
        all_instances = lu.cfg.GetAllInstancesInfo()
10352
        group_to_instances = dict((uuid, []) for uuid in self.wanted)
10353

    
10354
        for instance in all_instances.values():
10355
          node = instance.primary_node
10356
          if node in node_to_group:
10357
            group_to_instances[node_to_group[node]].append(instance.name)
10358

    
10359
        if not do_nodes:
10360
          # Do not pass on node information if it was not requested.
10361
          group_to_nodes = None
10362

    
10363
    return query.GroupQueryData([self._all_groups[uuid]
10364
                                 for uuid in self.wanted],
10365
                                group_to_nodes, group_to_instances)
10366

    
10367

    
10368
class LUGroupQuery(NoHooksLU):
10369
  """Logical unit for querying node groups.
10370

10371
  """
10372
  REQ_BGL = False
10373

    
10374
  def CheckArguments(self):
10375
    self.gq = _GroupQuery(qlang.MakeSimpleFilter("name", self.op.names),
10376
                          self.op.output_fields, False)
10377

    
10378
  def ExpandNames(self):
10379
    self.gq.ExpandNames(self)
10380

    
10381
  def Exec(self, feedback_fn):
10382
    return self.gq.OldStyleQuery(self)
10383

    
10384

    
10385
class LUGroupSetParams(LogicalUnit):
10386
  """Modifies the parameters of a node group.
10387

10388
  """
10389
  HPATH = "group-modify"
10390
  HTYPE = constants.HTYPE_GROUP
10391
  REQ_BGL = False
10392

    
10393
  def CheckArguments(self):
10394
    all_changes = [
10395
      self.op.ndparams,
10396
      self.op.alloc_policy,
10397
      ]
10398

    
10399
    if all_changes.count(None) == len(all_changes):
10400
      raise errors.OpPrereqError("Please pass at least one modification",
10401
                                 errors.ECODE_INVAL)
10402

    
10403
  def ExpandNames(self):
10404
    # This raises errors.OpPrereqError on its own:
10405
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10406

    
10407
    self.needed_locks = {
10408
      locking.LEVEL_NODEGROUP: [self.group_uuid],
10409
      }
10410

    
10411
  def CheckPrereq(self):
10412
    """Check prerequisites.
10413

10414
    """
10415
    self.group = self.cfg.GetNodeGroup(self.group_uuid)
10416

    
10417
    if self.group is None:
10418
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
10419
                               (self.op.group_name, self.group_uuid))
10420

    
10421
    if self.op.ndparams:
10422
      new_ndparams = _GetUpdatedParams(self.group.ndparams, self.op.ndparams)
10423
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
10424
      self.new_ndparams = new_ndparams
10425

    
10426
  def BuildHooksEnv(self):
10427
    """Build hooks env.
10428

10429
    """
10430
    env = {
10431
      "GROUP_NAME": self.op.group_name,
10432
      "NEW_ALLOC_POLICY": self.op.alloc_policy,
10433
      }
10434
    mn = self.cfg.GetMasterNode()
10435
    return env, [mn], [mn]
10436

    
10437
  def Exec(self, feedback_fn):
10438
    """Modifies the node group.
10439

10440
    """
10441
    result = []
10442

    
10443
    if self.op.ndparams:
10444
      self.group.ndparams = self.new_ndparams
10445
      result.append(("ndparams", str(self.group.ndparams)))
10446

    
10447
    if self.op.alloc_policy:
10448
      self.group.alloc_policy = self.op.alloc_policy
10449

    
10450
    self.cfg.Update(self.group, feedback_fn)
10451
    return result
10452

    
10453

    
10454

    
10455
class LUGroupRemove(LogicalUnit):
10456
  HPATH = "group-remove"
10457
  HTYPE = constants.HTYPE_GROUP
10458
  REQ_BGL = False
10459

    
10460
  def ExpandNames(self):
10461
    # This will raises errors.OpPrereqError on its own:
10462
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10463
    self.needed_locks = {
10464
      locking.LEVEL_NODEGROUP: [self.group_uuid],
10465
      }
10466

    
10467
  def CheckPrereq(self):
10468
    """Check prerequisites.
10469

10470
    This checks that the given group name exists as a node group, that is
10471
    empty (i.e., contains no nodes), and that is not the last group of the
10472
    cluster.
10473

10474
    """
10475
    # Verify that the group is empty.
10476
    group_nodes = [node.name
10477
                   for node in self.cfg.GetAllNodesInfo().values()
10478
                   if node.group == self.group_uuid]
10479

    
10480
    if group_nodes:
10481
      raise errors.OpPrereqError("Group '%s' not empty, has the following"
10482
                                 " nodes: %s" %
10483
                                 (self.op.group_name,
10484
                                  utils.CommaJoin(utils.NiceSort(group_nodes))),
10485
                                 errors.ECODE_STATE)
10486

    
10487
    # Verify the cluster would not be left group-less.
10488
    if len(self.cfg.GetNodeGroupList()) == 1:
10489
      raise errors.OpPrereqError("Group '%s' is the only group,"
10490
                                 " cannot be removed" %
10491
                                 self.op.group_name,
10492
                                 errors.ECODE_STATE)
10493

    
10494
  def BuildHooksEnv(self):
10495
    """Build hooks env.
10496

10497
    """
10498
    env = {
10499
      "GROUP_NAME": self.op.group_name,
10500
      }
10501
    mn = self.cfg.GetMasterNode()
10502
    return env, [mn], [mn]
10503

    
10504
  def Exec(self, feedback_fn):
10505
    """Remove the node group.
10506

10507
    """
10508
    try:
10509
      self.cfg.RemoveNodeGroup(self.group_uuid)
10510
    except errors.ConfigurationError:
10511
      raise errors.OpExecError("Group '%s' with UUID %s disappeared" %
10512
                               (self.op.group_name, self.group_uuid))
10513

    
10514
    self.remove_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
10515

    
10516

    
10517
class LUGroupRename(LogicalUnit):
10518
  HPATH = "group-rename"
10519
  HTYPE = constants.HTYPE_GROUP
10520
  REQ_BGL = False
10521

    
10522
  def ExpandNames(self):
10523
    # This raises errors.OpPrereqError on its own:
10524
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10525

    
10526
    self.needed_locks = {
10527
      locking.LEVEL_NODEGROUP: [self.group_uuid],
10528
      }
10529

    
10530
  def CheckPrereq(self):
10531
    """Check prerequisites.
10532

10533
    Ensures requested new name is not yet used.
10534

10535
    """
10536
    try:
10537
      new_name_uuid = self.cfg.LookupNodeGroup(self.op.new_name)
10538
    except errors.OpPrereqError:
10539
      pass
10540
    else:
10541
      raise errors.OpPrereqError("Desired new name '%s' clashes with existing"
10542
                                 " node group (UUID: %s)" %
10543
                                 (self.op.new_name, new_name_uuid),
10544
                                 errors.ECODE_EXISTS)
10545

    
10546
  def BuildHooksEnv(self):
10547
    """Build hooks env.
10548

10549
    """
10550
    env = {
10551
      "OLD_NAME": self.op.group_name,
10552
      "NEW_NAME": self.op.new_name,
10553
      }
10554

    
10555
    mn = self.cfg.GetMasterNode()
10556
    all_nodes = self.cfg.GetAllNodesInfo()
10557
    run_nodes = [mn]
10558
    all_nodes.pop(mn, None)
10559

    
10560
    for node in all_nodes.values():
10561
      if node.group == self.group_uuid:
10562
        run_nodes.append(node.name)
10563

    
10564
    return env, run_nodes, run_nodes
10565

    
10566
  def Exec(self, feedback_fn):
10567
    """Rename the node group.
10568

10569
    """
10570
    group = self.cfg.GetNodeGroup(self.group_uuid)
10571

    
10572
    if group is None:
10573
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
10574
                               (self.op.group_name, self.group_uuid))
10575

    
10576
    group.name = self.op.new_name
10577
    self.cfg.Update(group, feedback_fn)
10578

    
10579
    return self.op.new_name
10580

    
10581

    
10582
class TagsLU(NoHooksLU): # pylint: disable-msg=W0223
10583
  """Generic tags LU.
10584

10585
  This is an abstract class which is the parent of all the other tags LUs.
10586

10587
  """
10588

    
10589
  def ExpandNames(self):
10590
    self.needed_locks = {}
10591
    if self.op.kind == constants.TAG_NODE:
10592
      self.op.name = _ExpandNodeName(self.cfg, self.op.name)
10593
      self.needed_locks[locking.LEVEL_NODE] = self.op.name
10594
    elif self.op.kind == constants.TAG_INSTANCE:
10595
      self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
10596
      self.needed_locks[locking.LEVEL_INSTANCE] = self.op.name
10597

    
10598
    # FIXME: Acquire BGL for cluster tag operations (as of this writing it's
10599
    # not possible to acquire the BGL based on opcode parameters)
10600

    
10601
  def CheckPrereq(self):
10602
    """Check prerequisites.
10603

10604
    """
10605
    if self.op.kind == constants.TAG_CLUSTER:
10606
      self.target = self.cfg.GetClusterInfo()
10607
    elif self.op.kind == constants.TAG_NODE:
10608
      self.target = self.cfg.GetNodeInfo(self.op.name)
10609
    elif self.op.kind == constants.TAG_INSTANCE:
10610
      self.target = self.cfg.GetInstanceInfo(self.op.name)
10611
    else:
10612
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
10613
                                 str(self.op.kind), errors.ECODE_INVAL)
10614

    
10615

    
10616
class LUTagsGet(TagsLU):
10617
  """Returns the tags of a given object.
10618

10619
  """
10620
  REQ_BGL = False
10621

    
10622
  def ExpandNames(self):
10623
    TagsLU.ExpandNames(self)
10624

    
10625
    # Share locks as this is only a read operation
10626
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
10627

    
10628
  def Exec(self, feedback_fn):
10629
    """Returns the tag list.
10630

10631
    """
10632
    return list(self.target.GetTags())
10633

    
10634

    
10635
class LUTagsSearch(NoHooksLU):
10636
  """Searches the tags for a given pattern.
10637

10638
  """
10639
  REQ_BGL = False
10640

    
10641
  def ExpandNames(self):
10642
    self.needed_locks = {}
10643

    
10644
  def CheckPrereq(self):
10645
    """Check prerequisites.
10646

10647
    This checks the pattern passed for validity by compiling it.
10648

10649
    """
10650
    try:
10651
      self.re = re.compile(self.op.pattern)
10652
    except re.error, err:
10653
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
10654
                                 (self.op.pattern, err), errors.ECODE_INVAL)
10655

    
10656
  def Exec(self, feedback_fn):
10657
    """Returns the tag list.
10658

10659
    """
10660
    cfg = self.cfg
10661
    tgts = [("/cluster", cfg.GetClusterInfo())]
10662
    ilist = cfg.GetAllInstancesInfo().values()
10663
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
10664
    nlist = cfg.GetAllNodesInfo().values()
10665
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
10666
    results = []
10667
    for path, target in tgts:
10668
      for tag in target.GetTags():
10669
        if self.re.search(tag):
10670
          results.append((path, tag))
10671
    return results
10672

    
10673

    
10674
class LUTagsSet(TagsLU):
10675
  """Sets a tag on a given object.
10676

10677
  """
10678
  REQ_BGL = False
10679

    
10680
  def CheckPrereq(self):
10681
    """Check prerequisites.
10682

10683
    This checks the type and length of the tag name and value.
10684

10685
    """
10686
    TagsLU.CheckPrereq(self)
10687
    for tag in self.op.tags:
10688
      objects.TaggableObject.ValidateTag(tag)
10689

    
10690
  def Exec(self, feedback_fn):
10691
    """Sets the tag.
10692

10693
    """
10694
    try:
10695
      for tag in self.op.tags:
10696
        self.target.AddTag(tag)
10697
    except errors.TagError, err:
10698
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
10699
    self.cfg.Update(self.target, feedback_fn)
10700

    
10701

    
10702
class LUTagsDel(TagsLU):
10703
  """Delete a list of tags from a given object.
10704

10705
  """
10706
  REQ_BGL = False
10707

    
10708
  def CheckPrereq(self):
10709
    """Check prerequisites.
10710

10711
    This checks that we have the given tag.
10712

10713
    """
10714
    TagsLU.CheckPrereq(self)
10715
    for tag in self.op.tags:
10716
      objects.TaggableObject.ValidateTag(tag)
10717
    del_tags = frozenset(self.op.tags)
10718
    cur_tags = self.target.GetTags()
10719

    
10720
    diff_tags = del_tags - cur_tags
10721
    if diff_tags:
10722
      diff_names = ("'%s'" % i for i in sorted(diff_tags))
10723
      raise errors.OpPrereqError("Tag(s) %s not found" %
10724
                                 (utils.CommaJoin(diff_names), ),
10725
                                 errors.ECODE_NOENT)
10726

    
10727
  def Exec(self, feedback_fn):
10728
    """Remove the tag from the object.
10729

10730
    """
10731
    for tag in self.op.tags:
10732
      self.target.RemoveTag(tag)
10733
    self.cfg.Update(self.target, feedback_fn)
10734

    
10735

    
10736
class LUTestDelay(NoHooksLU):
10737
  """Sleep for a specified amount of time.
10738

10739
  This LU sleeps on the master and/or nodes for a specified amount of
10740
  time.
10741

10742
  """
10743
  REQ_BGL = False
10744

    
10745
  def ExpandNames(self):
10746
    """Expand names and set required locks.
10747

10748
    This expands the node list, if any.
10749

10750
    """
10751
    self.needed_locks = {}
10752
    if self.op.on_nodes:
10753
      # _GetWantedNodes can be used here, but is not always appropriate to use
10754
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
10755
      # more information.
10756
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
10757
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
10758

    
10759
  def _TestDelay(self):
10760
    """Do the actual sleep.
10761

10762
    """
10763
    if self.op.on_master:
10764
      if not utils.TestDelay(self.op.duration):
10765
        raise errors.OpExecError("Error during master delay test")
10766
    if self.op.on_nodes:
10767
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
10768
      for node, node_result in result.items():
10769
        node_result.Raise("Failure during rpc call to node %s" % node)
10770

    
10771
  def Exec(self, feedback_fn):
10772
    """Execute the test delay opcode, with the wanted repetitions.
10773

10774
    """
10775
    if self.op.repeat == 0:
10776
      self._TestDelay()
10777
    else:
10778
      top_value = self.op.repeat - 1
10779
      for i in range(self.op.repeat):
10780
        self.LogInfo("Test delay iteration %d/%d" % (i, top_value))
10781
        self._TestDelay()
10782

    
10783

    
10784
class LUTestJqueue(NoHooksLU):
10785
  """Utility LU to test some aspects of the job queue.
10786

10787
  """
10788
  REQ_BGL = False
10789

    
10790
  # Must be lower than default timeout for WaitForJobChange to see whether it
10791
  # notices changed jobs
10792
  _CLIENT_CONNECT_TIMEOUT = 20.0
10793
  _CLIENT_CONFIRM_TIMEOUT = 60.0
10794

    
10795
  @classmethod
10796
  def _NotifyUsingSocket(cls, cb, errcls):
10797
    """Opens a Unix socket and waits for another program to connect.
10798

10799
    @type cb: callable
10800
    @param cb: Callback to send socket name to client
10801
    @type errcls: class
10802
    @param errcls: Exception class to use for errors
10803

10804
    """
10805
    # Using a temporary directory as there's no easy way to create temporary
10806
    # sockets without writing a custom loop around tempfile.mktemp and
10807
    # socket.bind
10808
    tmpdir = tempfile.mkdtemp()
10809
    try:
10810
      tmpsock = utils.PathJoin(tmpdir, "sock")
10811

    
10812
      logging.debug("Creating temporary socket at %s", tmpsock)
10813
      sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
10814
      try:
10815
        sock.bind(tmpsock)
10816
        sock.listen(1)
10817

    
10818
        # Send details to client
10819
        cb(tmpsock)
10820

    
10821
        # Wait for client to connect before continuing
10822
        sock.settimeout(cls._CLIENT_CONNECT_TIMEOUT)
10823
        try:
10824
          (conn, _) = sock.accept()
10825
        except socket.error, err:
10826
          raise errcls("Client didn't connect in time (%s)" % err)
10827
      finally:
10828
        sock.close()
10829
    finally:
10830
      # Remove as soon as client is connected
10831
      shutil.rmtree(tmpdir)
10832

    
10833
    # Wait for client to close
10834
    try:
10835
      try:
10836
        # pylint: disable-msg=E1101
10837
        # Instance of '_socketobject' has no ... member
10838
        conn.settimeout(cls._CLIENT_CONFIRM_TIMEOUT)
10839
        conn.recv(1)
10840
      except socket.error, err:
10841
        raise errcls("Client failed to confirm notification (%s)" % err)
10842
    finally:
10843
      conn.close()
10844

    
10845
  def _SendNotification(self, test, arg, sockname):
10846
    """Sends a notification to the client.
10847

10848
    @type test: string
10849
    @param test: Test name
10850
    @param arg: Test argument (depends on test)
10851
    @type sockname: string
10852
    @param sockname: Socket path
10853

10854
    """
10855
    self.Log(constants.ELOG_JQUEUE_TEST, (sockname, test, arg))
10856

    
10857
  def _Notify(self, prereq, test, arg):
10858
    """Notifies the client of a test.
10859

10860
    @type prereq: bool
10861
    @param prereq: Whether this is a prereq-phase test
10862
    @type test: string
10863
    @param test: Test name
10864
    @param arg: Test argument (depends on test)
10865

10866
    """
10867
    if prereq:
10868
      errcls = errors.OpPrereqError
10869
    else:
10870
      errcls = errors.OpExecError
10871

    
10872
    return self._NotifyUsingSocket(compat.partial(self._SendNotification,
10873
                                                  test, arg),
10874
                                   errcls)
10875

    
10876
  def CheckArguments(self):
10877
    self.checkargs_calls = getattr(self, "checkargs_calls", 0) + 1
10878
    self.expandnames_calls = 0
10879

    
10880
  def ExpandNames(self):
10881
    checkargs_calls = getattr(self, "checkargs_calls", 0)
10882
    if checkargs_calls < 1:
10883
      raise errors.ProgrammerError("CheckArguments was not called")
10884

    
10885
    self.expandnames_calls += 1
10886

    
10887
    if self.op.notify_waitlock:
10888
      self._Notify(True, constants.JQT_EXPANDNAMES, None)
10889

    
10890
    self.LogInfo("Expanding names")
10891

    
10892
    # Get lock on master node (just to get a lock, not for a particular reason)
10893
    self.needed_locks = {
10894
      locking.LEVEL_NODE: self.cfg.GetMasterNode(),
10895
      }
10896

    
10897
  def Exec(self, feedback_fn):
10898
    if self.expandnames_calls < 1:
10899
      raise errors.ProgrammerError("ExpandNames was not called")
10900

    
10901
    if self.op.notify_exec:
10902
      self._Notify(False, constants.JQT_EXEC, None)
10903

    
10904
    self.LogInfo("Executing")
10905

    
10906
    if self.op.log_messages:
10907
      self._Notify(False, constants.JQT_STARTMSG, len(self.op.log_messages))
10908
      for idx, msg in enumerate(self.op.log_messages):
10909
        self.LogInfo("Sending log message %s", idx + 1)
10910
        feedback_fn(constants.JQT_MSGPREFIX + msg)
10911
        # Report how many test messages have been sent
10912
        self._Notify(False, constants.JQT_LOGMSG, idx + 1)
10913

    
10914
    if self.op.fail:
10915
      raise errors.OpExecError("Opcode failure was requested")
10916

    
10917
    return True
10918

    
10919

    
10920
class IAllocator(object):
10921
  """IAllocator framework.
10922

10923
  An IAllocator instance has three sets of attributes:
10924
    - cfg that is needed to query the cluster
10925
    - input data (all members of the _KEYS class attribute are required)
10926
    - four buffer attributes (in|out_data|text), that represent the
10927
      input (to the external script) in text and data structure format,
10928
      and the output from it, again in two formats
10929
    - the result variables from the script (success, info, nodes) for
10930
      easy usage
10931

10932
  """
10933
  # pylint: disable-msg=R0902
10934
  # lots of instance attributes
10935
  _ALLO_KEYS = [
10936
    "name", "mem_size", "disks", "disk_template",
10937
    "os", "tags", "nics", "vcpus", "hypervisor",
10938
    ]
10939
  _RELO_KEYS = [
10940
    "name", "relocate_from",
10941
    ]
10942
  _EVAC_KEYS = [
10943
    "evac_nodes",
10944
    ]
10945

    
10946
  def __init__(self, cfg, rpc, mode, **kwargs):
10947
    self.cfg = cfg
10948
    self.rpc = rpc
10949
    # init buffer variables
10950
    self.in_text = self.out_text = self.in_data = self.out_data = None
10951
    # init all input fields so that pylint is happy
10952
    self.mode = mode
10953
    self.mem_size = self.disks = self.disk_template = None
10954
    self.os = self.tags = self.nics = self.vcpus = None
10955
    self.hypervisor = None
10956
    self.relocate_from = None
10957
    self.name = None
10958
    self.evac_nodes = None
10959
    # computed fields
10960
    self.required_nodes = None
10961
    # init result fields
10962
    self.success = self.info = self.result = None
10963
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
10964
      keyset = self._ALLO_KEYS
10965
      fn = self._AddNewInstance
10966
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
10967
      keyset = self._RELO_KEYS
10968
      fn = self._AddRelocateInstance
10969
    elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
10970
      keyset = self._EVAC_KEYS
10971
      fn = self._AddEvacuateNodes
10972
    else:
10973
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
10974
                                   " IAllocator" % self.mode)
10975
    for key in kwargs:
10976
      if key not in keyset:
10977
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
10978
                                     " IAllocator" % key)
10979
      setattr(self, key, kwargs[key])
10980

    
10981
    for key in keyset:
10982
      if key not in kwargs:
10983
        raise errors.ProgrammerError("Missing input parameter '%s' to"
10984
                                     " IAllocator" % key)
10985
    self._BuildInputData(fn)
10986

    
10987
  def _ComputeClusterData(self):
10988
    """Compute the generic allocator input data.
10989

10990
    This is the data that is independent of the actual operation.
10991

10992
    """
10993
    cfg = self.cfg
10994
    cluster_info = cfg.GetClusterInfo()
10995
    # cluster data
10996
    data = {
10997
      "version": constants.IALLOCATOR_VERSION,
10998
      "cluster_name": cfg.GetClusterName(),
10999
      "cluster_tags": list(cluster_info.GetTags()),
11000
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
11001
      # we don't have job IDs
11002
      }
11003
    ninfo = cfg.GetAllNodesInfo()
11004
    iinfo = cfg.GetAllInstancesInfo().values()
11005
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
11006

    
11007
    # node data
11008
    node_list = [n.name for n in ninfo.values() if n.vm_capable]
11009

    
11010
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
11011
      hypervisor_name = self.hypervisor
11012
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
11013
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
11014
    elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
11015
      hypervisor_name = cluster_info.enabled_hypervisors[0]
11016

    
11017
    node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
11018
                                        hypervisor_name)
11019
    node_iinfo = \
11020
      self.rpc.call_all_instances_info(node_list,
11021
                                       cluster_info.enabled_hypervisors)
11022

    
11023
    data["nodegroups"] = self._ComputeNodeGroupData(cfg)
11024

    
11025
    config_ndata = self._ComputeBasicNodeData(ninfo)
11026
    data["nodes"] = self._ComputeDynamicNodeData(ninfo, node_data, node_iinfo,
11027
                                                 i_list, config_ndata)
11028
    assert len(data["nodes"]) == len(ninfo), \
11029
        "Incomplete node data computed"
11030

    
11031
    data["instances"] = self._ComputeInstanceData(cluster_info, i_list)
11032

    
11033
    self.in_data = data
11034

    
11035
  @staticmethod
11036
  def _ComputeNodeGroupData(cfg):
11037
    """Compute node groups data.
11038

11039
    """
11040
    ng = {}
11041
    for guuid, gdata in cfg.GetAllNodeGroupsInfo().items():
11042
      ng[guuid] = {
11043
        "name": gdata.name,
11044
        "alloc_policy": gdata.alloc_policy,
11045
        }
11046
    return ng
11047

    
11048
  @staticmethod
11049
  def _ComputeBasicNodeData(node_cfg):
11050
    """Compute global node data.
11051

11052
    @rtype: dict
11053
    @returns: a dict of name: (node dict, node config)
11054

11055
    """
11056
    node_results = {}
11057
    for ninfo in node_cfg.values():
11058
      # fill in static (config-based) values
11059
      pnr = {
11060
        "tags": list(ninfo.GetTags()),
11061
        "primary_ip": ninfo.primary_ip,
11062
        "secondary_ip": ninfo.secondary_ip,
11063
        "offline": ninfo.offline,
11064
        "drained": ninfo.drained,
11065
        "master_candidate": ninfo.master_candidate,
11066
        "group": ninfo.group,
11067
        "master_capable": ninfo.master_capable,
11068
        "vm_capable": ninfo.vm_capable,
11069
        }
11070

    
11071
      node_results[ninfo.name] = pnr
11072

    
11073
    return node_results
11074

    
11075
  @staticmethod
11076
  def _ComputeDynamicNodeData(node_cfg, node_data, node_iinfo, i_list,
11077
                              node_results):
11078
    """Compute global node data.
11079

11080
    @param node_results: the basic node structures as filled from the config
11081

11082
    """
11083
    # make a copy of the current dict
11084
    node_results = dict(node_results)
11085
    for nname, nresult in node_data.items():
11086
      assert nname in node_results, "Missing basic data for node %s" % nname
11087
      ninfo = node_cfg[nname]
11088

    
11089
      if not (ninfo.offline or ninfo.drained):
11090
        nresult.Raise("Can't get data for node %s" % nname)
11091
        node_iinfo[nname].Raise("Can't get node instance info from node %s" %
11092
                                nname)
11093
        remote_info = nresult.payload
11094

    
11095
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
11096
                     'vg_size', 'vg_free', 'cpu_total']:
11097
          if attr not in remote_info:
11098
            raise errors.OpExecError("Node '%s' didn't return attribute"
11099
                                     " '%s'" % (nname, attr))
11100
          if not isinstance(remote_info[attr], int):
11101
            raise errors.OpExecError("Node '%s' returned invalid value"
11102
                                     " for '%s': %s" %
11103
                                     (nname, attr, remote_info[attr]))
11104
        # compute memory used by primary instances
11105
        i_p_mem = i_p_up_mem = 0
11106
        for iinfo, beinfo in i_list:
11107
          if iinfo.primary_node == nname:
11108
            i_p_mem += beinfo[constants.BE_MEMORY]
11109
            if iinfo.name not in node_iinfo[nname].payload:
11110
              i_used_mem = 0
11111
            else:
11112
              i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory'])
11113
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
11114
            remote_info['memory_free'] -= max(0, i_mem_diff)
11115

    
11116
            if iinfo.admin_up:
11117
              i_p_up_mem += beinfo[constants.BE_MEMORY]
11118

    
11119
        # compute memory used by instances
11120
        pnr_dyn = {
11121
          "total_memory": remote_info['memory_total'],
11122
          "reserved_memory": remote_info['memory_dom0'],
11123
          "free_memory": remote_info['memory_free'],
11124
          "total_disk": remote_info['vg_size'],
11125
          "free_disk": remote_info['vg_free'],
11126
          "total_cpus": remote_info['cpu_total'],
11127
          "i_pri_memory": i_p_mem,
11128
          "i_pri_up_memory": i_p_up_mem,
11129
          }
11130
        pnr_dyn.update(node_results[nname])
11131
        node_results[nname] = pnr_dyn
11132

    
11133
    return node_results
11134

    
11135
  @staticmethod
11136
  def _ComputeInstanceData(cluster_info, i_list):
11137
    """Compute global instance data.
11138

11139
    """
11140
    instance_data = {}
11141
    for iinfo, beinfo in i_list:
11142
      nic_data = []
11143
      for nic in iinfo.nics:
11144
        filled_params = cluster_info.SimpleFillNIC(nic.nicparams)
11145
        nic_dict = {"mac": nic.mac,
11146
                    "ip": nic.ip,
11147
                    "mode": filled_params[constants.NIC_MODE],
11148
                    "link": filled_params[constants.NIC_LINK],
11149
                   }
11150
        if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
11151
          nic_dict["bridge"] = filled_params[constants.NIC_LINK]
11152
        nic_data.append(nic_dict)
11153
      pir = {
11154
        "tags": list(iinfo.GetTags()),
11155
        "admin_up": iinfo.admin_up,
11156
        "vcpus": beinfo[constants.BE_VCPUS],
11157
        "memory": beinfo[constants.BE_MEMORY],
11158
        "os": iinfo.os,
11159
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
11160
        "nics": nic_data,
11161
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
11162
        "disk_template": iinfo.disk_template,
11163
        "hypervisor": iinfo.hypervisor,
11164
        }
11165
      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
11166
                                                 pir["disks"])
11167
      instance_data[iinfo.name] = pir
11168

    
11169
    return instance_data
11170

    
11171
  def _AddNewInstance(self):
11172
    """Add new instance data to allocator structure.
11173

11174
    This in combination with _AllocatorGetClusterData will create the
11175
    correct structure needed as input for the allocator.
11176

11177
    The checks for the completeness of the opcode must have already been
11178
    done.
11179

11180
    """
11181
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
11182

    
11183
    if self.disk_template in constants.DTS_NET_MIRROR:
11184
      self.required_nodes = 2
11185
    else:
11186
      self.required_nodes = 1
11187
    request = {
11188
      "name": self.name,
11189
      "disk_template": self.disk_template,
11190
      "tags": self.tags,
11191
      "os": self.os,
11192
      "vcpus": self.vcpus,
11193
      "memory": self.mem_size,
11194
      "disks": self.disks,
11195
      "disk_space_total": disk_space,
11196
      "nics": self.nics,
11197
      "required_nodes": self.required_nodes,
11198
      }
11199
    return request
11200

    
11201
  def _AddRelocateInstance(self):
11202
    """Add relocate instance data to allocator structure.
11203

11204
    This in combination with _IAllocatorGetClusterData will create the
11205
    correct structure needed as input for the allocator.
11206

11207
    The checks for the completeness of the opcode must have already been
11208
    done.
11209

11210
    """
11211
    instance = self.cfg.GetInstanceInfo(self.name)
11212
    if instance is None:
11213
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
11214
                                   " IAllocator" % self.name)
11215

    
11216
    if instance.disk_template not in constants.DTS_NET_MIRROR:
11217
      raise errors.OpPrereqError("Can't relocate non-mirrored instances",
11218
                                 errors.ECODE_INVAL)
11219

    
11220
    if len(instance.secondary_nodes) != 1:
11221
      raise errors.OpPrereqError("Instance has not exactly one secondary node",
11222
                                 errors.ECODE_STATE)
11223

    
11224
    self.required_nodes = 1
11225
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
11226
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
11227

    
11228
    request = {
11229
      "name": self.name,
11230
      "disk_space_total": disk_space,
11231
      "required_nodes": self.required_nodes,
11232
      "relocate_from": self.relocate_from,
11233
      }
11234
    return request
11235

    
11236
  def _AddEvacuateNodes(self):
11237
    """Add evacuate nodes data to allocator structure.
11238

11239
    """
11240
    request = {
11241
      "evac_nodes": self.evac_nodes
11242
      }
11243
    return request
11244

    
11245
  def _BuildInputData(self, fn):
11246
    """Build input data structures.
11247

11248
    """
11249
    self._ComputeClusterData()
11250

    
11251
    request = fn()
11252
    request["type"] = self.mode
11253
    self.in_data["request"] = request
11254

    
11255
    self.in_text = serializer.Dump(self.in_data)
11256

    
11257
  def Run(self, name, validate=True, call_fn=None):
11258
    """Run an instance allocator and return the results.
11259

11260
    """
11261
    if call_fn is None:
11262
      call_fn = self.rpc.call_iallocator_runner
11263

    
11264
    result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
11265
    result.Raise("Failure while running the iallocator script")
11266

    
11267
    self.out_text = result.payload
11268
    if validate:
11269
      self._ValidateResult()
11270

    
11271
  def _ValidateResult(self):
11272
    """Process the allocator results.
11273

11274
    This will process and if successful save the result in
11275
    self.out_data and the other parameters.
11276

11277
    """
11278
    try:
11279
      rdict = serializer.Load(self.out_text)
11280
    except Exception, err:
11281
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
11282

    
11283
    if not isinstance(rdict, dict):
11284
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
11285

    
11286
    # TODO: remove backwards compatiblity in later versions
11287
    if "nodes" in rdict and "result" not in rdict:
11288
      rdict["result"] = rdict["nodes"]
11289
      del rdict["nodes"]
11290

    
11291
    for key in "success", "info", "result":
11292
      if key not in rdict:
11293
        raise errors.OpExecError("Can't parse iallocator results:"
11294
                                 " missing key '%s'" % key)
11295
      setattr(self, key, rdict[key])
11296

    
11297
    if not isinstance(rdict["result"], list):
11298
      raise errors.OpExecError("Can't parse iallocator results: 'result' key"
11299
                               " is not a list")
11300
    self.out_data = rdict
11301

    
11302

    
11303
class LUTestAllocator(NoHooksLU):
11304
  """Run allocator tests.
11305

11306
  This LU runs the allocator tests
11307

11308
  """
11309
  def CheckPrereq(self):
11310
    """Check prerequisites.
11311

11312
    This checks the opcode parameters depending on the director and mode test.
11313

11314
    """
11315
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
11316
      for attr in ["mem_size", "disks", "disk_template",
11317
                   "os", "tags", "nics", "vcpus"]:
11318
        if not hasattr(self.op, attr):
11319
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
11320
                                     attr, errors.ECODE_INVAL)
11321
      iname = self.cfg.ExpandInstanceName(self.op.name)
11322
      if iname is not None:
11323
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
11324
                                   iname, errors.ECODE_EXISTS)
11325
      if not isinstance(self.op.nics, list):
11326
        raise errors.OpPrereqError("Invalid parameter 'nics'",
11327
                                   errors.ECODE_INVAL)
11328
      if not isinstance(self.op.disks, list):
11329
        raise errors.OpPrereqError("Invalid parameter 'disks'",
11330
                                   errors.ECODE_INVAL)
11331
      for row in self.op.disks:
11332
        if (not isinstance(row, dict) or
11333
            "size" not in row or
11334
            not isinstance(row["size"], int) or
11335
            "mode" not in row or
11336
            row["mode"] not in ['r', 'w']):
11337
          raise errors.OpPrereqError("Invalid contents of the 'disks'"
11338
                                     " parameter", errors.ECODE_INVAL)
11339
      if self.op.hypervisor is None:
11340
        self.op.hypervisor = self.cfg.GetHypervisorType()
11341
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
11342
      fname = _ExpandInstanceName(self.cfg, self.op.name)
11343
      self.op.name = fname
11344
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
11345
    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
11346
      if not hasattr(self.op, "evac_nodes"):
11347
        raise errors.OpPrereqError("Missing attribute 'evac_nodes' on"
11348
                                   " opcode input", errors.ECODE_INVAL)
11349
    else:
11350
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
11351
                                 self.op.mode, errors.ECODE_INVAL)
11352

    
11353
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
11354
      if self.op.allocator is None:
11355
        raise errors.OpPrereqError("Missing allocator name",
11356
                                   errors.ECODE_INVAL)
11357
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
11358
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
11359
                                 self.op.direction, errors.ECODE_INVAL)
11360

    
11361
  def Exec(self, feedback_fn):
11362
    """Run the allocator test.
11363

11364
    """
11365
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
11366
      ial = IAllocator(self.cfg, self.rpc,
11367
                       mode=self.op.mode,
11368
                       name=self.op.name,
11369
                       mem_size=self.op.mem_size,
11370
                       disks=self.op.disks,
11371
                       disk_template=self.op.disk_template,
11372
                       os=self.op.os,
11373
                       tags=self.op.tags,
11374
                       nics=self.op.nics,
11375
                       vcpus=self.op.vcpus,
11376
                       hypervisor=self.op.hypervisor,
11377
                       )
11378
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
11379
      ial = IAllocator(self.cfg, self.rpc,
11380
                       mode=self.op.mode,
11381
                       name=self.op.name,
11382
                       relocate_from=list(self.relocate_from),
11383
                       )
11384
    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
11385
      ial = IAllocator(self.cfg, self.rpc,
11386
                       mode=self.op.mode,
11387
                       evac_nodes=self.op.evac_nodes)
11388
    else:
11389
      raise errors.ProgrammerError("Uncatched mode %s in"
11390
                                   " LUTestAllocator.Exec", self.op.mode)
11391

    
11392
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
11393
      result = ial.in_text
11394
    else:
11395
      ial.Run(self.op.allocator, validate=False)
11396
      result = ial.out_text
11397
    return result
11398

    
11399

    
11400
#: Query type implementations
11401
_QUERY_IMPL = {
11402
  constants.QR_INSTANCE: _InstanceQuery,
11403
  constants.QR_NODE: _NodeQuery,
11404
  constants.QR_GROUP: _GroupQuery,
11405
  }
11406

    
11407

    
11408
def _GetQueryImplementation(name):
11409
  """Returns the implemtnation for a query type.
11410

11411
  @param name: Query type, must be one of L{constants.QR_OP_QUERY}
11412

11413
  """
11414
  try:
11415
    return _QUERY_IMPL[name]
11416
  except KeyError:
11417
    raise errors.OpPrereqError("Unknown query resource '%s'" % name,
11418
                               errors.ECODE_INVAL)