Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 3b01286e

History | View | Annotate | Download (379.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the master-side code."""
23

    
24
# pylint: disable-msg=W0201,C0302
25

    
26
# W0201 since most LU attributes are defined in CheckPrereq or similar
27
# functions
28

    
29
# C0302: since we have waaaay to many lines in this module
30

    
31
import os
32
import os.path
33
import time
34
import re
35
import platform
36
import logging
37
import copy
38
import OpenSSL
39
import socket
40
import tempfile
41
import shutil
42

    
43
from ganeti import ssh
44
from ganeti import utils
45
from ganeti import errors
46
from ganeti import hypervisor
47
from ganeti import locking
48
from ganeti import constants
49
from ganeti import objects
50
from ganeti import serializer
51
from ganeti import ssconf
52
from ganeti import uidpool
53
from ganeti import compat
54
from ganeti import masterd
55
from ganeti import netutils
56
from ganeti import ht
57

    
58
import ganeti.masterd.instance # pylint: disable-msg=W0611
59

    
60
# Common opcode attributes
61

    
62
#: output fields for a query operation
63
_POutputFields = ("output_fields", ht.NoDefault, ht.TListOf(ht.TNonEmptyString))
64

    
65

    
66
#: the shutdown timeout
67
_PShutdownTimeout = ("shutdown_timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT,
68
                     ht.TPositiveInt)
69

    
70
#: the force parameter
71
_PForce = ("force", False, ht.TBool)
72

    
73
#: a required instance name (for single-instance LUs)
74
_PInstanceName = ("instance_name", ht.NoDefault, ht.TNonEmptyString)
75

    
76
#: Whether to ignore offline nodes
77
_PIgnoreOfflineNodes = ("ignore_offline_nodes", False, ht.TBool)
78

    
79
#: a required node name (for single-node LUs)
80
_PNodeName = ("node_name", ht.NoDefault, ht.TNonEmptyString)
81

    
82
#: the migration type (live/non-live)
83
_PMigrationMode = ("mode", None,
84
                   ht.TOr(ht.TNone, ht.TElemOf(constants.HT_MIGRATION_MODES)))
85

    
86
#: the obsolete 'live' mode (boolean)
87
_PMigrationLive = ("live", None, ht.TMaybeBool)
88

    
89

    
90
# End types
91
class LogicalUnit(object):
92
  """Logical Unit base class.
93

94
  Subclasses must follow these rules:
95
    - implement ExpandNames
96
    - implement CheckPrereq (except when tasklets are used)
97
    - implement Exec (except when tasklets are used)
98
    - implement BuildHooksEnv
99
    - redefine HPATH and HTYPE
100
    - optionally redefine their run requirements:
101
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
102

103
  Note that all commands require root permissions.
104

105
  @ivar dry_run_result: the value (if any) that will be returned to the caller
106
      in dry-run mode (signalled by opcode dry_run parameter)
107
  @cvar _OP_PARAMS: a list of opcode attributes, their defaults values
108
      they should get if not already defined, and types they must match
109

110
  """
111
  HPATH = None
112
  HTYPE = None
113
  _OP_PARAMS = []
114
  REQ_BGL = True
115

    
116
  def __init__(self, processor, op, context, rpc):
117
    """Constructor for LogicalUnit.
118

119
    This needs to be overridden in derived classes in order to check op
120
    validity.
121

122
    """
123
    self.proc = processor
124
    self.op = op
125
    self.cfg = context.cfg
126
    self.context = context
127
    self.rpc = rpc
128
    # Dicts used to declare locking needs to mcpu
129
    self.needed_locks = None
130
    self.acquired_locks = {}
131
    self.share_locks = dict.fromkeys(locking.LEVELS, 0)
132
    self.add_locks = {}
133
    self.remove_locks = {}
134
    # Used to force good behavior when calling helper functions
135
    self.recalculate_locks = {}
136
    self.__ssh = None
137
    # logging
138
    self.Log = processor.Log # pylint: disable-msg=C0103
139
    self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103
140
    self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103
141
    self.LogStep = processor.LogStep # pylint: disable-msg=C0103
142
    # support for dry-run
143
    self.dry_run_result = None
144
    # support for generic debug attribute
145
    if (not hasattr(self.op, "debug_level") or
146
        not isinstance(self.op.debug_level, int)):
147
      self.op.debug_level = 0
148

    
149
    # Tasklets
150
    self.tasklets = None
151

    
152
    # The new kind-of-type-system
153
    op_id = self.op.OP_ID
154
    for attr_name, aval, test in self._OP_PARAMS:
155
      if not hasattr(op, attr_name):
156
        if aval == ht.NoDefault:
157
          raise errors.OpPrereqError("Required parameter '%s.%s' missing" %
158
                                     (op_id, attr_name), errors.ECODE_INVAL)
159
        else:
160
          if callable(aval):
161
            dval = aval()
162
          else:
163
            dval = aval
164
          setattr(self.op, attr_name, dval)
165
      attr_val = getattr(op, attr_name)
166
      if test == ht.NoType:
167
        # no tests here
168
        continue
169
      if not callable(test):
170
        raise errors.ProgrammerError("Validation for parameter '%s.%s' failed,"
171
                                     " given type is not a proper type (%s)" %
172
                                     (op_id, attr_name, test))
173
      if not test(attr_val):
174
        logging.error("OpCode %s, parameter %s, has invalid type %s/value %s",
175
                      self.op.OP_ID, attr_name, type(attr_val), attr_val)
176
        raise errors.OpPrereqError("Parameter '%s.%s' fails validation" %
177
                                   (op_id, attr_name), errors.ECODE_INVAL)
178

    
179
    self.CheckArguments()
180

    
181
  def __GetSSH(self):
182
    """Returns the SshRunner object
183

184
    """
185
    if not self.__ssh:
186
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
187
    return self.__ssh
188

    
189
  ssh = property(fget=__GetSSH)
190

    
191
  def CheckArguments(self):
192
    """Check syntactic validity for the opcode arguments.
193

194
    This method is for doing a simple syntactic check and ensure
195
    validity of opcode parameters, without any cluster-related
196
    checks. While the same can be accomplished in ExpandNames and/or
197
    CheckPrereq, doing these separate is better because:
198

199
      - ExpandNames is left as as purely a lock-related function
200
      - CheckPrereq is run after we have acquired locks (and possible
201
        waited for them)
202

203
    The function is allowed to change the self.op attribute so that
204
    later methods can no longer worry about missing parameters.
205

206
    """
207
    pass
208

    
209
  def ExpandNames(self):
210
    """Expand names for this LU.
211

212
    This method is called before starting to execute the opcode, and it should
213
    update all the parameters of the opcode to their canonical form (e.g. a
214
    short node name must be fully expanded after this method has successfully
215
    completed). This way locking, hooks, logging, ecc. can work correctly.
216

217
    LUs which implement this method must also populate the self.needed_locks
218
    member, as a dict with lock levels as keys, and a list of needed lock names
219
    as values. Rules:
220

221
      - use an empty dict if you don't need any lock
222
      - if you don't need any lock at a particular level omit that level
223
      - don't put anything for the BGL level
224
      - if you want all locks at a level use locking.ALL_SET as a value
225

226
    If you need to share locks (rather than acquire them exclusively) at one
227
    level you can modify self.share_locks, setting a true value (usually 1) for
228
    that level. By default locks are not shared.
229

230
    This function can also define a list of tasklets, which then will be
231
    executed in order instead of the usual LU-level CheckPrereq and Exec
232
    functions, if those are not defined by the LU.
233

234
    Examples::
235

236
      # Acquire all nodes and one instance
237
      self.needed_locks = {
238
        locking.LEVEL_NODE: locking.ALL_SET,
239
        locking.LEVEL_INSTANCE: ['instance1.example.com'],
240
      }
241
      # Acquire just two nodes
242
      self.needed_locks = {
243
        locking.LEVEL_NODE: ['node1.example.com', 'node2.example.com'],
244
      }
245
      # Acquire no locks
246
      self.needed_locks = {} # No, you can't leave it to the default value None
247

248
    """
249
    # The implementation of this method is mandatory only if the new LU is
250
    # concurrent, so that old LUs don't need to be changed all at the same
251
    # time.
252
    if self.REQ_BGL:
253
      self.needed_locks = {} # Exclusive LUs don't need locks.
254
    else:
255
      raise NotImplementedError
256

    
257
  def DeclareLocks(self, level):
258
    """Declare LU locking needs for a level
259

260
    While most LUs can just declare their locking needs at ExpandNames time,
261
    sometimes there's the need to calculate some locks after having acquired
262
    the ones before. This function is called just before acquiring locks at a
263
    particular level, but after acquiring the ones at lower levels, and permits
264
    such calculations. It can be used to modify self.needed_locks, and by
265
    default it does nothing.
266

267
    This function is only called if you have something already set in
268
    self.needed_locks for the level.
269

270
    @param level: Locking level which is going to be locked
271
    @type level: member of ganeti.locking.LEVELS
272

273
    """
274

    
275
  def CheckPrereq(self):
276
    """Check prerequisites for this LU.
277

278
    This method should check that the prerequisites for the execution
279
    of this LU are fulfilled. It can do internode communication, but
280
    it should be idempotent - no cluster or system changes are
281
    allowed.
282

283
    The method should raise errors.OpPrereqError in case something is
284
    not fulfilled. Its return value is ignored.
285

286
    This method should also update all the parameters of the opcode to
287
    their canonical form if it hasn't been done by ExpandNames before.
288

289
    """
290
    if self.tasklets is not None:
291
      for (idx, tl) in enumerate(self.tasklets):
292
        logging.debug("Checking prerequisites for tasklet %s/%s",
293
                      idx + 1, len(self.tasklets))
294
        tl.CheckPrereq()
295
    else:
296
      pass
297

    
298
  def Exec(self, feedback_fn):
299
    """Execute the LU.
300

301
    This method should implement the actual work. It should raise
302
    errors.OpExecError for failures that are somewhat dealt with in
303
    code, or expected.
304

305
    """
306
    if self.tasklets is not None:
307
      for (idx, tl) in enumerate(self.tasklets):
308
        logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
309
        tl.Exec(feedback_fn)
310
    else:
311
      raise NotImplementedError
312

    
313
  def BuildHooksEnv(self):
314
    """Build hooks environment for this LU.
315

316
    This method should return a three-node tuple consisting of: a dict
317
    containing the environment that will be used for running the
318
    specific hook for this LU, a list of node names on which the hook
319
    should run before the execution, and a list of node names on which
320
    the hook should run after the execution.
321

322
    The keys of the dict must not have 'GANETI_' prefixed as this will
323
    be handled in the hooks runner. Also note additional keys will be
324
    added by the hooks runner. If the LU doesn't define any
325
    environment, an empty dict (and not None) should be returned.
326

327
    No nodes should be returned as an empty list (and not None).
328

329
    Note that if the HPATH for a LU class is None, this function will
330
    not be called.
331

332
    """
333
    raise NotImplementedError
334

    
335
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
336
    """Notify the LU about the results of its hooks.
337

338
    This method is called every time a hooks phase is executed, and notifies
339
    the Logical Unit about the hooks' result. The LU can then use it to alter
340
    its result based on the hooks.  By default the method does nothing and the
341
    previous result is passed back unchanged but any LU can define it if it
342
    wants to use the local cluster hook-scripts somehow.
343

344
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
345
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
346
    @param hook_results: the results of the multi-node hooks rpc call
347
    @param feedback_fn: function used send feedback back to the caller
348
    @param lu_result: the previous Exec result this LU had, or None
349
        in the PRE phase
350
    @return: the new Exec result, based on the previous result
351
        and hook results
352

353
    """
354
    # API must be kept, thus we ignore the unused argument and could
355
    # be a function warnings
356
    # pylint: disable-msg=W0613,R0201
357
    return lu_result
358

    
359
  def _ExpandAndLockInstance(self):
360
    """Helper function to expand and lock an instance.
361

362
    Many LUs that work on an instance take its name in self.op.instance_name
363
    and need to expand it and then declare the expanded name for locking. This
364
    function does it, and then updates self.op.instance_name to the expanded
365
    name. It also initializes needed_locks as a dict, if this hasn't been done
366
    before.
367

368
    """
369
    if self.needed_locks is None:
370
      self.needed_locks = {}
371
    else:
372
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
373
        "_ExpandAndLockInstance called with instance-level locks set"
374
    self.op.instance_name = _ExpandInstanceName(self.cfg,
375
                                                self.op.instance_name)
376
    self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
377

    
378
  def _LockInstancesNodes(self, primary_only=False):
379
    """Helper function to declare instances' nodes for locking.
380

381
    This function should be called after locking one or more instances to lock
382
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
383
    with all primary or secondary nodes for instances already locked and
384
    present in self.needed_locks[locking.LEVEL_INSTANCE].
385

386
    It should be called from DeclareLocks, and for safety only works if
387
    self.recalculate_locks[locking.LEVEL_NODE] is set.
388

389
    In the future it may grow parameters to just lock some instance's nodes, or
390
    to just lock primaries or secondary nodes, if needed.
391

392
    If should be called in DeclareLocks in a way similar to::
393

394
      if level == locking.LEVEL_NODE:
395
        self._LockInstancesNodes()
396

397
    @type primary_only: boolean
398
    @param primary_only: only lock primary nodes of locked instances
399

400
    """
401
    assert locking.LEVEL_NODE in self.recalculate_locks, \
402
      "_LockInstancesNodes helper function called with no nodes to recalculate"
403

    
404
    # TODO: check if we're really been called with the instance locks held
405

    
406
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
407
    # future we might want to have different behaviors depending on the value
408
    # of self.recalculate_locks[locking.LEVEL_NODE]
409
    wanted_nodes = []
410
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
411
      instance = self.context.cfg.GetInstanceInfo(instance_name)
412
      wanted_nodes.append(instance.primary_node)
413
      if not primary_only:
414
        wanted_nodes.extend(instance.secondary_nodes)
415

    
416
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
417
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
418
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
419
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
420

    
421
    del self.recalculate_locks[locking.LEVEL_NODE]
422

    
423

    
424
class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223
425
  """Simple LU which runs no hooks.
426

427
  This LU is intended as a parent for other LogicalUnits which will
428
  run no hooks, in order to reduce duplicate code.
429

430
  """
431
  HPATH = None
432
  HTYPE = None
433

    
434
  def BuildHooksEnv(self):
435
    """Empty BuildHooksEnv for NoHooksLu.
436

437
    This just raises an error.
438

439
    """
440
    assert False, "BuildHooksEnv called for NoHooksLUs"
441

    
442

    
443
class Tasklet:
444
  """Tasklet base class.
445

446
  Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
447
  they can mix legacy code with tasklets. Locking needs to be done in the LU,
448
  tasklets know nothing about locks.
449

450
  Subclasses must follow these rules:
451
    - Implement CheckPrereq
452
    - Implement Exec
453

454
  """
455
  def __init__(self, lu):
456
    self.lu = lu
457

    
458
    # Shortcuts
459
    self.cfg = lu.cfg
460
    self.rpc = lu.rpc
461

    
462
  def CheckPrereq(self):
463
    """Check prerequisites for this tasklets.
464

465
    This method should check whether the prerequisites for the execution of
466
    this tasklet are fulfilled. It can do internode communication, but it
467
    should be idempotent - no cluster or system changes are allowed.
468

469
    The method should raise errors.OpPrereqError in case something is not
470
    fulfilled. Its return value is ignored.
471

472
    This method should also update all parameters to their canonical form if it
473
    hasn't been done before.
474

475
    """
476
    pass
477

    
478
  def Exec(self, feedback_fn):
479
    """Execute the tasklet.
480

481
    This method should implement the actual work. It should raise
482
    errors.OpExecError for failures that are somewhat dealt with in code, or
483
    expected.
484

485
    """
486
    raise NotImplementedError
487

    
488

    
489
def _GetWantedNodes(lu, nodes):
490
  """Returns list of checked and expanded node names.
491

492
  @type lu: L{LogicalUnit}
493
  @param lu: the logical unit on whose behalf we execute
494
  @type nodes: list
495
  @param nodes: list of node names or None for all nodes
496
  @rtype: list
497
  @return: the list of nodes, sorted
498
  @raise errors.ProgrammerError: if the nodes parameter is wrong type
499

500
  """
501
  if not nodes:
502
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
503
      " non-empty list of nodes whose name is to be expanded.")
504

    
505
  wanted = [_ExpandNodeName(lu.cfg, name) for name in nodes]
506
  return utils.NiceSort(wanted)
507

    
508

    
509
def _GetWantedInstances(lu, instances):
510
  """Returns list of checked and expanded instance names.
511

512
  @type lu: L{LogicalUnit}
513
  @param lu: the logical unit on whose behalf we execute
514
  @type instances: list
515
  @param instances: list of instance names or None for all instances
516
  @rtype: list
517
  @return: the list of instances, sorted
518
  @raise errors.OpPrereqError: if the instances parameter is wrong type
519
  @raise errors.OpPrereqError: if any of the passed instances is not found
520

521
  """
522
  if instances:
523
    wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
524
  else:
525
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
526
  return wanted
527

    
528

    
529
def _GetUpdatedParams(old_params, update_dict,
530
                      use_default=True, use_none=False):
531
  """Return the new version of a parameter dictionary.
532

533
  @type old_params: dict
534
  @param old_params: old parameters
535
  @type update_dict: dict
536
  @param update_dict: dict containing new parameter values, or
537
      constants.VALUE_DEFAULT to reset the parameter to its default
538
      value
539
  @param use_default: boolean
540
  @type use_default: whether to recognise L{constants.VALUE_DEFAULT}
541
      values as 'to be deleted' values
542
  @param use_none: boolean
543
  @type use_none: whether to recognise C{None} values as 'to be
544
      deleted' values
545
  @rtype: dict
546
  @return: the new parameter dictionary
547

548
  """
549
  params_copy = copy.deepcopy(old_params)
550
  for key, val in update_dict.iteritems():
551
    if ((use_default and val == constants.VALUE_DEFAULT) or
552
        (use_none and val is None)):
553
      try:
554
        del params_copy[key]
555
      except KeyError:
556
        pass
557
    else:
558
      params_copy[key] = val
559
  return params_copy
560

    
561

    
562
def _CheckOutputFields(static, dynamic, selected):
563
  """Checks whether all selected fields are valid.
564

565
  @type static: L{utils.FieldSet}
566
  @param static: static fields set
567
  @type dynamic: L{utils.FieldSet}
568
  @param dynamic: dynamic fields set
569

570
  """
571
  f = utils.FieldSet()
572
  f.Extend(static)
573
  f.Extend(dynamic)
574

    
575
  delta = f.NonMatching(selected)
576
  if delta:
577
    raise errors.OpPrereqError("Unknown output fields selected: %s"
578
                               % ",".join(delta), errors.ECODE_INVAL)
579

    
580

    
581
def _CheckGlobalHvParams(params):
582
  """Validates that given hypervisor params are not global ones.
583

584
  This will ensure that instances don't get customised versions of
585
  global params.
586

587
  """
588
  used_globals = constants.HVC_GLOBALS.intersection(params)
589
  if used_globals:
590
    msg = ("The following hypervisor parameters are global and cannot"
591
           " be customized at instance level, please modify them at"
592
           " cluster level: %s" % utils.CommaJoin(used_globals))
593
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
594

    
595

    
596
def _CheckNodeOnline(lu, node):
597
  """Ensure that a given node is online.
598

599
  @param lu: the LU on behalf of which we make the check
600
  @param node: the node to check
601
  @raise errors.OpPrereqError: if the node is offline
602

603
  """
604
  if lu.cfg.GetNodeInfo(node).offline:
605
    raise errors.OpPrereqError("Can't use offline node %s" % node,
606
                               errors.ECODE_STATE)
607

    
608

    
609
def _CheckNodeNotDrained(lu, node):
610
  """Ensure that a given node is not drained.
611

612
  @param lu: the LU on behalf of which we make the check
613
  @param node: the node to check
614
  @raise errors.OpPrereqError: if the node is drained
615

616
  """
617
  if lu.cfg.GetNodeInfo(node).drained:
618
    raise errors.OpPrereqError("Can't use drained node %s" % node,
619
                               errors.ECODE_STATE)
620

    
621

    
622
def _CheckNodeVmCapable(lu, node):
623
  """Ensure that a given node is vm capable.
624

625
  @param lu: the LU on behalf of which we make the check
626
  @param node: the node to check
627
  @raise errors.OpPrereqError: if the node is not vm capable
628

629
  """
630
  if not lu.cfg.GetNodeInfo(node).vm_capable:
631
    raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node,
632
                               errors.ECODE_STATE)
633

    
634

    
635
def _CheckNodeHasOS(lu, node, os_name, force_variant):
636
  """Ensure that a node supports a given OS.
637

638
  @param lu: the LU on behalf of which we make the check
639
  @param node: the node to check
640
  @param os_name: the OS to query about
641
  @param force_variant: whether to ignore variant errors
642
  @raise errors.OpPrereqError: if the node is not supporting the OS
643

644
  """
645
  result = lu.rpc.call_os_get(node, os_name)
646
  result.Raise("OS '%s' not in supported OS list for node %s" %
647
               (os_name, node),
648
               prereq=True, ecode=errors.ECODE_INVAL)
649
  if not force_variant:
650
    _CheckOSVariant(result.payload, os_name)
651

    
652

    
653
def _RequireFileStorage():
654
  """Checks that file storage is enabled.
655

656
  @raise errors.OpPrereqError: when file storage is disabled
657

658
  """
659
  if not constants.ENABLE_FILE_STORAGE:
660
    raise errors.OpPrereqError("File storage disabled at configure time",
661
                               errors.ECODE_INVAL)
662

    
663

    
664
def _CheckDiskTemplate(template):
665
  """Ensure a given disk template is valid.
666

667
  """
668
  if template not in constants.DISK_TEMPLATES:
669
    msg = ("Invalid disk template name '%s', valid templates are: %s" %
670
           (template, utils.CommaJoin(constants.DISK_TEMPLATES)))
671
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
672
  if template == constants.DT_FILE:
673
    _RequireFileStorage()
674
  return True
675

    
676

    
677
def _CheckStorageType(storage_type):
678
  """Ensure a given storage type is valid.
679

680
  """
681
  if storage_type not in constants.VALID_STORAGE_TYPES:
682
    raise errors.OpPrereqError("Unknown storage type: %s" % storage_type,
683
                               errors.ECODE_INVAL)
684
  if storage_type == constants.ST_FILE:
685
    _RequireFileStorage()
686
  return True
687

    
688

    
689
def _GetClusterDomainSecret():
690
  """Reads the cluster domain secret.
691

692
  """
693
  return utils.ReadOneLineFile(constants.CLUSTER_DOMAIN_SECRET_FILE,
694
                               strict=True)
695

    
696

    
697
def _CheckInstanceDown(lu, instance, reason):
698
  """Ensure that an instance is not running."""
699
  if instance.admin_up:
700
    raise errors.OpPrereqError("Instance %s is marked to be up, %s" %
701
                               (instance.name, reason), errors.ECODE_STATE)
702

    
703
  pnode = instance.primary_node
704
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
705
  ins_l.Raise("Can't contact node %s for instance information" % pnode,
706
              prereq=True, ecode=errors.ECODE_ENVIRON)
707

    
708
  if instance.name in ins_l.payload:
709
    raise errors.OpPrereqError("Instance %s is running, %s" %
710
                               (instance.name, reason), errors.ECODE_STATE)
711

    
712

    
713
def _ExpandItemName(fn, name, kind):
714
  """Expand an item name.
715

716
  @param fn: the function to use for expansion
717
  @param name: requested item name
718
  @param kind: text description ('Node' or 'Instance')
719
  @return: the resolved (full) name
720
  @raise errors.OpPrereqError: if the item is not found
721

722
  """
723
  full_name = fn(name)
724
  if full_name is None:
725
    raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
726
                               errors.ECODE_NOENT)
727
  return full_name
728

    
729

    
730
def _ExpandNodeName(cfg, name):
731
  """Wrapper over L{_ExpandItemName} for nodes."""
732
  return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
733

    
734

    
735
def _ExpandInstanceName(cfg, name):
736
  """Wrapper over L{_ExpandItemName} for instance."""
737
  return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
738

    
739

    
740
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
741
                          memory, vcpus, nics, disk_template, disks,
742
                          bep, hvp, hypervisor_name):
743
  """Builds instance related env variables for hooks
744

745
  This builds the hook environment from individual variables.
746

747
  @type name: string
748
  @param name: the name of the instance
749
  @type primary_node: string
750
  @param primary_node: the name of the instance's primary node
751
  @type secondary_nodes: list
752
  @param secondary_nodes: list of secondary nodes as strings
753
  @type os_type: string
754
  @param os_type: the name of the instance's OS
755
  @type status: boolean
756
  @param status: the should_run status of the instance
757
  @type memory: string
758
  @param memory: the memory size of the instance
759
  @type vcpus: string
760
  @param vcpus: the count of VCPUs the instance has
761
  @type nics: list
762
  @param nics: list of tuples (ip, mac, mode, link) representing
763
      the NICs the instance has
764
  @type disk_template: string
765
  @param disk_template: the disk template of the instance
766
  @type disks: list
767
  @param disks: the list of (size, mode) pairs
768
  @type bep: dict
769
  @param bep: the backend parameters for the instance
770
  @type hvp: dict
771
  @param hvp: the hypervisor parameters for the instance
772
  @type hypervisor_name: string
773
  @param hypervisor_name: the hypervisor for the instance
774
  @rtype: dict
775
  @return: the hook environment for this instance
776

777
  """
778
  if status:
779
    str_status = "up"
780
  else:
781
    str_status = "down"
782
  env = {
783
    "OP_TARGET": name,
784
    "INSTANCE_NAME": name,
785
    "INSTANCE_PRIMARY": primary_node,
786
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
787
    "INSTANCE_OS_TYPE": os_type,
788
    "INSTANCE_STATUS": str_status,
789
    "INSTANCE_MEMORY": memory,
790
    "INSTANCE_VCPUS": vcpus,
791
    "INSTANCE_DISK_TEMPLATE": disk_template,
792
    "INSTANCE_HYPERVISOR": hypervisor_name,
793
  }
794

    
795
  if nics:
796
    nic_count = len(nics)
797
    for idx, (ip, mac, mode, link) in enumerate(nics):
798
      if ip is None:
799
        ip = ""
800
      env["INSTANCE_NIC%d_IP" % idx] = ip
801
      env["INSTANCE_NIC%d_MAC" % idx] = mac
802
      env["INSTANCE_NIC%d_MODE" % idx] = mode
803
      env["INSTANCE_NIC%d_LINK" % idx] = link
804
      if mode == constants.NIC_MODE_BRIDGED:
805
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
806
  else:
807
    nic_count = 0
808

    
809
  env["INSTANCE_NIC_COUNT"] = nic_count
810

    
811
  if disks:
812
    disk_count = len(disks)
813
    for idx, (size, mode) in enumerate(disks):
814
      env["INSTANCE_DISK%d_SIZE" % idx] = size
815
      env["INSTANCE_DISK%d_MODE" % idx] = mode
816
  else:
817
    disk_count = 0
818

    
819
  env["INSTANCE_DISK_COUNT"] = disk_count
820

    
821
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
822
    for key, value in source.items():
823
      env["INSTANCE_%s_%s" % (kind, key)] = value
824

    
825
  return env
826

    
827

    
828
def _NICListToTuple(lu, nics):
829
  """Build a list of nic information tuples.
830

831
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
832
  value in LUQueryInstanceData.
833

834
  @type lu:  L{LogicalUnit}
835
  @param lu: the logical unit on whose behalf we execute
836
  @type nics: list of L{objects.NIC}
837
  @param nics: list of nics to convert to hooks tuples
838

839
  """
840
  hooks_nics = []
841
  cluster = lu.cfg.GetClusterInfo()
842
  for nic in nics:
843
    ip = nic.ip
844
    mac = nic.mac
845
    filled_params = cluster.SimpleFillNIC(nic.nicparams)
846
    mode = filled_params[constants.NIC_MODE]
847
    link = filled_params[constants.NIC_LINK]
848
    hooks_nics.append((ip, mac, mode, link))
849
  return hooks_nics
850

    
851

    
852
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
853
  """Builds instance related env variables for hooks from an object.
854

855
  @type lu: L{LogicalUnit}
856
  @param lu: the logical unit on whose behalf we execute
857
  @type instance: L{objects.Instance}
858
  @param instance: the instance for which we should build the
859
      environment
860
  @type override: dict
861
  @param override: dictionary with key/values that will override
862
      our values
863
  @rtype: dict
864
  @return: the hook environment dictionary
865

866
  """
867
  cluster = lu.cfg.GetClusterInfo()
868
  bep = cluster.FillBE(instance)
869
  hvp = cluster.FillHV(instance)
870
  args = {
871
    'name': instance.name,
872
    'primary_node': instance.primary_node,
873
    'secondary_nodes': instance.secondary_nodes,
874
    'os_type': instance.os,
875
    'status': instance.admin_up,
876
    'memory': bep[constants.BE_MEMORY],
877
    'vcpus': bep[constants.BE_VCPUS],
878
    'nics': _NICListToTuple(lu, instance.nics),
879
    'disk_template': instance.disk_template,
880
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
881
    'bep': bep,
882
    'hvp': hvp,
883
    'hypervisor_name': instance.hypervisor,
884
  }
885
  if override:
886
    args.update(override)
887
  return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142
888

    
889

    
890
def _AdjustCandidatePool(lu, exceptions):
891
  """Adjust the candidate pool after node operations.
892

893
  """
894
  mod_list = lu.cfg.MaintainCandidatePool(exceptions)
895
  if mod_list:
896
    lu.LogInfo("Promoted nodes to master candidate role: %s",
897
               utils.CommaJoin(node.name for node in mod_list))
898
    for name in mod_list:
899
      lu.context.ReaddNode(name)
900
  mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
901
  if mc_now > mc_max:
902
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
903
               (mc_now, mc_max))
904

    
905

    
906
def _DecideSelfPromotion(lu, exceptions=None):
907
  """Decide whether I should promote myself as a master candidate.
908

909
  """
910
  cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
911
  mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
912
  # the new node will increase mc_max with one, so:
913
  mc_should = min(mc_should + 1, cp_size)
914
  return mc_now < mc_should
915

    
916

    
917
def _CheckNicsBridgesExist(lu, target_nics, target_node):
918
  """Check that the brigdes needed by a list of nics exist.
919

920
  """
921
  cluster = lu.cfg.GetClusterInfo()
922
  paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in target_nics]
923
  brlist = [params[constants.NIC_LINK] for params in paramslist
924
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
925
  if brlist:
926
    result = lu.rpc.call_bridges_exist(target_node, brlist)
927
    result.Raise("Error checking bridges on destination node '%s'" %
928
                 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
929

    
930

    
931
def _CheckInstanceBridgesExist(lu, instance, node=None):
932
  """Check that the brigdes needed by an instance exist.
933

934
  """
935
  if node is None:
936
    node = instance.primary_node
937
  _CheckNicsBridgesExist(lu, instance.nics, node)
938

    
939

    
940
def _CheckOSVariant(os_obj, name):
941
  """Check whether an OS name conforms to the os variants specification.
942

943
  @type os_obj: L{objects.OS}
944
  @param os_obj: OS object to check
945
  @type name: string
946
  @param name: OS name passed by the user, to check for validity
947

948
  """
949
  if not os_obj.supported_variants:
950
    return
951
  variant = objects.OS.GetVariant(name)
952
  if not variant:
953
    raise errors.OpPrereqError("OS name must include a variant",
954
                               errors.ECODE_INVAL)
955

    
956
  if variant not in os_obj.supported_variants:
957
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
958

    
959

    
960
def _GetNodeInstancesInner(cfg, fn):
961
  return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
962

    
963

    
964
def _GetNodeInstances(cfg, node_name):
965
  """Returns a list of all primary and secondary instances on a node.
966

967
  """
968

    
969
  return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
970

    
971

    
972
def _GetNodePrimaryInstances(cfg, node_name):
973
  """Returns primary instances on a node.
974

975
  """
976
  return _GetNodeInstancesInner(cfg,
977
                                lambda inst: node_name == inst.primary_node)
978

    
979

    
980
def _GetNodeSecondaryInstances(cfg, node_name):
981
  """Returns secondary instances on a node.
982

983
  """
984
  return _GetNodeInstancesInner(cfg,
985
                                lambda inst: node_name in inst.secondary_nodes)
986

    
987

    
988
def _GetStorageTypeArgs(cfg, storage_type):
989
  """Returns the arguments for a storage type.
990

991
  """
992
  # Special case for file storage
993
  if storage_type == constants.ST_FILE:
994
    # storage.FileStorage wants a list of storage directories
995
    return [[cfg.GetFileStorageDir()]]
996

    
997
  return []
998

    
999

    
1000
def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
1001
  faulty = []
1002

    
1003
  for dev in instance.disks:
1004
    cfg.SetDiskID(dev, node_name)
1005

    
1006
  result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
1007
  result.Raise("Failed to get disk status from node %s" % node_name,
1008
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
1009

    
1010
  for idx, bdev_status in enumerate(result.payload):
1011
    if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
1012
      faulty.append(idx)
1013

    
1014
  return faulty
1015

    
1016

    
1017
def _CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
1018
  """Check the sanity of iallocator and node arguments and use the
1019
  cluster-wide iallocator if appropriate.
1020

1021
  Check that at most one of (iallocator, node) is specified. If none is
1022
  specified, then the LU's opcode's iallocator slot is filled with the
1023
  cluster-wide default iallocator.
1024

1025
  @type iallocator_slot: string
1026
  @param iallocator_slot: the name of the opcode iallocator slot
1027
  @type node_slot: string
1028
  @param node_slot: the name of the opcode target node slot
1029

1030
  """
1031
  node = getattr(lu.op, node_slot, None)
1032
  iallocator = getattr(lu.op, iallocator_slot, None)
1033

    
1034
  if node is not None and iallocator is not None:
1035
    raise errors.OpPrereqError("Do not specify both, iallocator and node.",
1036
                               errors.ECODE_INVAL)
1037
  elif node is None and iallocator is None:
1038
    default_iallocator = lu.cfg.GetDefaultIAllocator()
1039
    if default_iallocator:
1040
      setattr(lu.op, iallocator_slot, default_iallocator)
1041
    else:
1042
      raise errors.OpPrereqError("No iallocator or node given and no"
1043
                                 " cluster-wide default iallocator found."
1044
                                 " Please specify either an iallocator or a"
1045
                                 " node, or set a cluster-wide default"
1046
                                 " iallocator.")
1047

    
1048

    
1049
class LUPostInitCluster(LogicalUnit):
1050
  """Logical unit for running hooks after cluster initialization.
1051

1052
  """
1053
  HPATH = "cluster-init"
1054
  HTYPE = constants.HTYPE_CLUSTER
1055

    
1056
  def BuildHooksEnv(self):
1057
    """Build hooks env.
1058

1059
    """
1060
    env = {"OP_TARGET": self.cfg.GetClusterName()}
1061
    mn = self.cfg.GetMasterNode()
1062
    return env, [], [mn]
1063

    
1064
  def Exec(self, feedback_fn):
1065
    """Nothing to do.
1066

1067
    """
1068
    return True
1069

    
1070

    
1071
class LUDestroyCluster(LogicalUnit):
1072
  """Logical unit for destroying the cluster.
1073

1074
  """
1075
  HPATH = "cluster-destroy"
1076
  HTYPE = constants.HTYPE_CLUSTER
1077

    
1078
  def BuildHooksEnv(self):
1079
    """Build hooks env.
1080

1081
    """
1082
    env = {"OP_TARGET": self.cfg.GetClusterName()}
1083
    return env, [], []
1084

    
1085
  def CheckPrereq(self):
1086
    """Check prerequisites.
1087

1088
    This checks whether the cluster is empty.
1089

1090
    Any errors are signaled by raising errors.OpPrereqError.
1091

1092
    """
1093
    master = self.cfg.GetMasterNode()
1094

    
1095
    nodelist = self.cfg.GetNodeList()
1096
    if len(nodelist) != 1 or nodelist[0] != master:
1097
      raise errors.OpPrereqError("There are still %d node(s) in"
1098
                                 " this cluster." % (len(nodelist) - 1),
1099
                                 errors.ECODE_INVAL)
1100
    instancelist = self.cfg.GetInstanceList()
1101
    if instancelist:
1102
      raise errors.OpPrereqError("There are still %d instance(s) in"
1103
                                 " this cluster." % len(instancelist),
1104
                                 errors.ECODE_INVAL)
1105

    
1106
  def Exec(self, feedback_fn):
1107
    """Destroys the cluster.
1108

1109
    """
1110
    master = self.cfg.GetMasterNode()
1111

    
1112
    # Run post hooks on master node before it's removed
1113
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
1114
    try:
1115
      hm.RunPhase(constants.HOOKS_PHASE_POST, [master])
1116
    except:
1117
      # pylint: disable-msg=W0702
1118
      self.LogWarning("Errors occurred running hooks on %s" % master)
1119

    
1120
    result = self.rpc.call_node_stop_master(master, False)
1121
    result.Raise("Could not disable the master role")
1122

    
1123
    return master
1124

    
1125

    
1126
def _VerifyCertificate(filename):
1127
  """Verifies a certificate for LUVerifyCluster.
1128

1129
  @type filename: string
1130
  @param filename: Path to PEM file
1131

1132
  """
1133
  try:
1134
    cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
1135
                                           utils.ReadFile(filename))
1136
  except Exception, err: # pylint: disable-msg=W0703
1137
    return (LUVerifyCluster.ETYPE_ERROR,
1138
            "Failed to load X509 certificate %s: %s" % (filename, err))
1139

    
1140
  (errcode, msg) = \
1141
    utils.VerifyX509Certificate(cert, constants.SSL_CERT_EXPIRATION_WARN,
1142
                                constants.SSL_CERT_EXPIRATION_ERROR)
1143

    
1144
  if msg:
1145
    fnamemsg = "While verifying %s: %s" % (filename, msg)
1146
  else:
1147
    fnamemsg = None
1148

    
1149
  if errcode is None:
1150
    return (None, fnamemsg)
1151
  elif errcode == utils.CERT_WARNING:
1152
    return (LUVerifyCluster.ETYPE_WARNING, fnamemsg)
1153
  elif errcode == utils.CERT_ERROR:
1154
    return (LUVerifyCluster.ETYPE_ERROR, fnamemsg)
1155

    
1156
  raise errors.ProgrammerError("Unhandled certificate error code %r" % errcode)
1157

    
1158

    
1159
class LUVerifyCluster(LogicalUnit):
1160
  """Verifies the cluster status.
1161

1162
  """
1163
  HPATH = "cluster-verify"
1164
  HTYPE = constants.HTYPE_CLUSTER
1165
  _OP_PARAMS = [
1166
    ("skip_checks", ht.EmptyList,
1167
     ht.TListOf(ht.TElemOf(constants.VERIFY_OPTIONAL_CHECKS))),
1168
    ("verbose", False, ht.TBool),
1169
    ("error_codes", False, ht.TBool),
1170
    ("debug_simulate_errors", False, ht.TBool),
1171
    ]
1172
  REQ_BGL = False
1173

    
1174
  TCLUSTER = "cluster"
1175
  TNODE = "node"
1176
  TINSTANCE = "instance"
1177

    
1178
  ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
1179
  ECLUSTERCERT = (TCLUSTER, "ECLUSTERCERT")
1180
  EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
1181
  EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
1182
  EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
1183
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1184
  EINSTANCEFAULTYDISK = (TINSTANCE, "EINSTANCEFAULTYDISK")
1185
  EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
1186
  ENODEDRBD = (TNODE, "ENODEDRBD")
1187
  ENODEDRBDHELPER = (TNODE, "ENODEDRBDHELPER")
1188
  ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
1189
  ENODEHOOKS = (TNODE, "ENODEHOOKS")
1190
  ENODEHV = (TNODE, "ENODEHV")
1191
  ENODELVM = (TNODE, "ENODELVM")
1192
  ENODEN1 = (TNODE, "ENODEN1")
1193
  ENODENET = (TNODE, "ENODENET")
1194
  ENODEOS = (TNODE, "ENODEOS")
1195
  ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
1196
  ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
1197
  ENODERPC = (TNODE, "ENODERPC")
1198
  ENODESSH = (TNODE, "ENODESSH")
1199
  ENODEVERSION = (TNODE, "ENODEVERSION")
1200
  ENODESETUP = (TNODE, "ENODESETUP")
1201
  ENODETIME = (TNODE, "ENODETIME")
1202

    
1203
  ETYPE_FIELD = "code"
1204
  ETYPE_ERROR = "ERROR"
1205
  ETYPE_WARNING = "WARNING"
1206

    
1207
  class NodeImage(object):
1208
    """A class representing the logical and physical status of a node.
1209

1210
    @type name: string
1211
    @ivar name: the node name to which this object refers
1212
    @ivar volumes: a structure as returned from
1213
        L{ganeti.backend.GetVolumeList} (runtime)
1214
    @ivar instances: a list of running instances (runtime)
1215
    @ivar pinst: list of configured primary instances (config)
1216
    @ivar sinst: list of configured secondary instances (config)
1217
    @ivar sbp: diction of {secondary-node: list of instances} of all peers
1218
        of this node (config)
1219
    @ivar mfree: free memory, as reported by hypervisor (runtime)
1220
    @ivar dfree: free disk, as reported by the node (runtime)
1221
    @ivar offline: the offline status (config)
1222
    @type rpc_fail: boolean
1223
    @ivar rpc_fail: whether the RPC verify call was successfull (overall,
1224
        not whether the individual keys were correct) (runtime)
1225
    @type lvm_fail: boolean
1226
    @ivar lvm_fail: whether the RPC call didn't return valid LVM data
1227
    @type hyp_fail: boolean
1228
    @ivar hyp_fail: whether the RPC call didn't return the instance list
1229
    @type ghost: boolean
1230
    @ivar ghost: whether this is a known node or not (config)
1231
    @type os_fail: boolean
1232
    @ivar os_fail: whether the RPC call didn't return valid OS data
1233
    @type oslist: list
1234
    @ivar oslist: list of OSes as diagnosed by DiagnoseOS
1235
    @type vm_capable: boolean
1236
    @ivar vm_capable: whether the node can host instances
1237

1238
    """
1239
    def __init__(self, offline=False, name=None, vm_capable=True):
1240
      self.name = name
1241
      self.volumes = {}
1242
      self.instances = []
1243
      self.pinst = []
1244
      self.sinst = []
1245
      self.sbp = {}
1246
      self.mfree = 0
1247
      self.dfree = 0
1248
      self.offline = offline
1249
      self.vm_capable = vm_capable
1250
      self.rpc_fail = False
1251
      self.lvm_fail = False
1252
      self.hyp_fail = False
1253
      self.ghost = False
1254
      self.os_fail = False
1255
      self.oslist = {}
1256

    
1257
  def ExpandNames(self):
1258
    self.needed_locks = {
1259
      locking.LEVEL_NODE: locking.ALL_SET,
1260
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1261
    }
1262
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1263

    
1264
  def _Error(self, ecode, item, msg, *args, **kwargs):
1265
    """Format an error message.
1266

1267
    Based on the opcode's error_codes parameter, either format a
1268
    parseable error code, or a simpler error string.
1269

1270
    This must be called only from Exec and functions called from Exec.
1271

1272
    """
1273
    ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
1274
    itype, etxt = ecode
1275
    # first complete the msg
1276
    if args:
1277
      msg = msg % args
1278
    # then format the whole message
1279
    if self.op.error_codes:
1280
      msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1281
    else:
1282
      if item:
1283
        item = " " + item
1284
      else:
1285
        item = ""
1286
      msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1287
    # and finally report it via the feedback_fn
1288
    self._feedback_fn("  - %s" % msg)
1289

    
1290
  def _ErrorIf(self, cond, *args, **kwargs):
1291
    """Log an error message if the passed condition is True.
1292

1293
    """
1294
    cond = bool(cond) or self.op.debug_simulate_errors
1295
    if cond:
1296
      self._Error(*args, **kwargs)
1297
    # do not mark the operation as failed for WARN cases only
1298
    if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
1299
      self.bad = self.bad or cond
1300

    
1301
  def _VerifyNode(self, ninfo, nresult):
1302
    """Perform some basic validation on data returned from a node.
1303

1304
      - check the result data structure is well formed and has all the
1305
        mandatory fields
1306
      - check ganeti version
1307

1308
    @type ninfo: L{objects.Node}
1309
    @param ninfo: the node to check
1310
    @param nresult: the results from the node
1311
    @rtype: boolean
1312
    @return: whether overall this call was successful (and we can expect
1313
         reasonable values in the respose)
1314

1315
    """
1316
    node = ninfo.name
1317
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1318

    
1319
    # main result, nresult should be a non-empty dict
1320
    test = not nresult or not isinstance(nresult, dict)
1321
    _ErrorIf(test, self.ENODERPC, node,
1322
                  "unable to verify node: no data returned")
1323
    if test:
1324
      return False
1325

    
1326
    # compares ganeti version
1327
    local_version = constants.PROTOCOL_VERSION
1328
    remote_version = nresult.get("version", None)
1329
    test = not (remote_version and
1330
                isinstance(remote_version, (list, tuple)) and
1331
                len(remote_version) == 2)
1332
    _ErrorIf(test, self.ENODERPC, node,
1333
             "connection to node returned invalid data")
1334
    if test:
1335
      return False
1336

    
1337
    test = local_version != remote_version[0]
1338
    _ErrorIf(test, self.ENODEVERSION, node,
1339
             "incompatible protocol versions: master %s,"
1340
             " node %s", local_version, remote_version[0])
1341
    if test:
1342
      return False
1343

    
1344
    # node seems compatible, we can actually try to look into its results
1345

    
1346
    # full package version
1347
    self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
1348
                  self.ENODEVERSION, node,
1349
                  "software version mismatch: master %s, node %s",
1350
                  constants.RELEASE_VERSION, remote_version[1],
1351
                  code=self.ETYPE_WARNING)
1352

    
1353
    hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
1354
    if ninfo.vm_capable and isinstance(hyp_result, dict):
1355
      for hv_name, hv_result in hyp_result.iteritems():
1356
        test = hv_result is not None
1357
        _ErrorIf(test, self.ENODEHV, node,
1358
                 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1359

    
1360
    test = nresult.get(constants.NV_NODESETUP,
1361
                           ["Missing NODESETUP results"])
1362
    _ErrorIf(test, self.ENODESETUP, node, "node setup error: %s",
1363
             "; ".join(test))
1364

    
1365
    return True
1366

    
1367
  def _VerifyNodeTime(self, ninfo, nresult,
1368
                      nvinfo_starttime, nvinfo_endtime):
1369
    """Check the node time.
1370

1371
    @type ninfo: L{objects.Node}
1372
    @param ninfo: the node to check
1373
    @param nresult: the remote results for the node
1374
    @param nvinfo_starttime: the start time of the RPC call
1375
    @param nvinfo_endtime: the end time of the RPC call
1376

1377
    """
1378
    node = ninfo.name
1379
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1380

    
1381
    ntime = nresult.get(constants.NV_TIME, None)
1382
    try:
1383
      ntime_merged = utils.MergeTime(ntime)
1384
    except (ValueError, TypeError):
1385
      _ErrorIf(True, self.ENODETIME, node, "Node returned invalid time")
1386
      return
1387

    
1388
    if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
1389
      ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
1390
    elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
1391
      ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
1392
    else:
1393
      ntime_diff = None
1394

    
1395
    _ErrorIf(ntime_diff is not None, self.ENODETIME, node,
1396
             "Node time diverges by at least %s from master node time",
1397
             ntime_diff)
1398

    
1399
  def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
1400
    """Check the node time.
1401

1402
    @type ninfo: L{objects.Node}
1403
    @param ninfo: the node to check
1404
    @param nresult: the remote results for the node
1405
    @param vg_name: the configured VG name
1406

1407
    """
1408
    if vg_name is None:
1409
      return
1410

    
1411
    node = ninfo.name
1412
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1413

    
1414
    # checks vg existence and size > 20G
1415
    vglist = nresult.get(constants.NV_VGLIST, None)
1416
    test = not vglist
1417
    _ErrorIf(test, self.ENODELVM, node, "unable to check volume groups")
1418
    if not test:
1419
      vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
1420
                                            constants.MIN_VG_SIZE)
1421
      _ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
1422

    
1423
    # check pv names
1424
    pvlist = nresult.get(constants.NV_PVLIST, None)
1425
    test = pvlist is None
1426
    _ErrorIf(test, self.ENODELVM, node, "Can't get PV list from node")
1427
    if not test:
1428
      # check that ':' is not present in PV names, since it's a
1429
      # special character for lvcreate (denotes the range of PEs to
1430
      # use on the PV)
1431
      for _, pvname, owner_vg in pvlist:
1432
        test = ":" in pvname
1433
        _ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV"
1434
                 " '%s' of VG '%s'", pvname, owner_vg)
1435

    
1436
  def _VerifyNodeNetwork(self, ninfo, nresult):
1437
    """Check the node time.
1438

1439
    @type ninfo: L{objects.Node}
1440
    @param ninfo: the node to check
1441
    @param nresult: the remote results for the node
1442

1443
    """
1444
    node = ninfo.name
1445
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1446

    
1447
    test = constants.NV_NODELIST not in nresult
1448
    _ErrorIf(test, self.ENODESSH, node,
1449
             "node hasn't returned node ssh connectivity data")
1450
    if not test:
1451
      if nresult[constants.NV_NODELIST]:
1452
        for a_node, a_msg in nresult[constants.NV_NODELIST].items():
1453
          _ErrorIf(True, self.ENODESSH, node,
1454
                   "ssh communication with node '%s': %s", a_node, a_msg)
1455

    
1456
    test = constants.NV_NODENETTEST not in nresult
1457
    _ErrorIf(test, self.ENODENET, node,
1458
             "node hasn't returned node tcp connectivity data")
1459
    if not test:
1460
      if nresult[constants.NV_NODENETTEST]:
1461
        nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
1462
        for anode in nlist:
1463
          _ErrorIf(True, self.ENODENET, node,
1464
                   "tcp communication with node '%s': %s",
1465
                   anode, nresult[constants.NV_NODENETTEST][anode])
1466

    
1467
    test = constants.NV_MASTERIP not in nresult
1468
    _ErrorIf(test, self.ENODENET, node,
1469
             "node hasn't returned node master IP reachability data")
1470
    if not test:
1471
      if not nresult[constants.NV_MASTERIP]:
1472
        if node == self.master_node:
1473
          msg = "the master node cannot reach the master IP (not configured?)"
1474
        else:
1475
          msg = "cannot reach the master IP"
1476
        _ErrorIf(True, self.ENODENET, node, msg)
1477

    
1478
  def _VerifyInstance(self, instance, instanceconfig, node_image,
1479
                      diskstatus):
1480
    """Verify an instance.
1481

1482
    This function checks to see if the required block devices are
1483
    available on the instance's node.
1484

1485
    """
1486
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1487
    node_current = instanceconfig.primary_node
1488

    
1489
    node_vol_should = {}
1490
    instanceconfig.MapLVsByNode(node_vol_should)
1491

    
1492
    for node in node_vol_should:
1493
      n_img = node_image[node]
1494
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1495
        # ignore missing volumes on offline or broken nodes
1496
        continue
1497
      for volume in node_vol_should[node]:
1498
        test = volume not in n_img.volumes
1499
        _ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
1500
                 "volume %s missing on node %s", volume, node)
1501

    
1502
    if instanceconfig.admin_up:
1503
      pri_img = node_image[node_current]
1504
      test = instance not in pri_img.instances and not pri_img.offline
1505
      _ErrorIf(test, self.EINSTANCEDOWN, instance,
1506
               "instance not running on its primary node %s",
1507
               node_current)
1508

    
1509
    for node, n_img in node_image.items():
1510
      if (not node == node_current):
1511
        test = instance in n_img.instances
1512
        _ErrorIf(test, self.EINSTANCEWRONGNODE, instance,
1513
                 "instance should not run on node %s", node)
1514

    
1515
    diskdata = [(nname, disk, idx)
1516
                for (nname, disks) in diskstatus.items()
1517
                for idx, disk in enumerate(disks)]
1518

    
1519
    for nname, bdev_status, idx in diskdata:
1520
      _ErrorIf(not bdev_status,
1521
               self.EINSTANCEFAULTYDISK, instance,
1522
               "couldn't retrieve status for disk/%s on %s", idx, nname)
1523
      _ErrorIf(bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY,
1524
               self.EINSTANCEFAULTYDISK, instance,
1525
               "disk/%s on %s is faulty", idx, nname)
1526

    
1527
  def _VerifyOrphanVolumes(self, node_vol_should, node_image, reserved):
1528
    """Verify if there are any unknown volumes in the cluster.
1529

1530
    The .os, .swap and backup volumes are ignored. All other volumes are
1531
    reported as unknown.
1532

1533
    @type reserved: L{ganeti.utils.FieldSet}
1534
    @param reserved: a FieldSet of reserved volume names
1535

1536
    """
1537
    for node, n_img in node_image.items():
1538
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1539
        # skip non-healthy nodes
1540
        continue
1541
      for volume in n_img.volumes:
1542
        test = ((node not in node_vol_should or
1543
                volume not in node_vol_should[node]) and
1544
                not reserved.Matches(volume))
1545
        self._ErrorIf(test, self.ENODEORPHANLV, node,
1546
                      "volume %s is unknown", volume)
1547

    
1548
  def _VerifyOrphanInstances(self, instancelist, node_image):
1549
    """Verify the list of running instances.
1550

1551
    This checks what instances are running but unknown to the cluster.
1552

1553
    """
1554
    for node, n_img in node_image.items():
1555
      for o_inst in n_img.instances:
1556
        test = o_inst not in instancelist
1557
        self._ErrorIf(test, self.ENODEORPHANINSTANCE, node,
1558
                      "instance %s on node %s should not exist", o_inst, node)
1559

    
1560
  def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
1561
    """Verify N+1 Memory Resilience.
1562

1563
    Check that if one single node dies we can still start all the
1564
    instances it was primary for.
1565

1566
    """
1567
    for node, n_img in node_image.items():
1568
      # This code checks that every node which is now listed as
1569
      # secondary has enough memory to host all instances it is
1570
      # supposed to should a single other node in the cluster fail.
1571
      # FIXME: not ready for failover to an arbitrary node
1572
      # FIXME: does not support file-backed instances
1573
      # WARNING: we currently take into account down instances as well
1574
      # as up ones, considering that even if they're down someone
1575
      # might want to start them even in the event of a node failure.
1576
      for prinode, instances in n_img.sbp.items():
1577
        needed_mem = 0
1578
        for instance in instances:
1579
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
1580
          if bep[constants.BE_AUTO_BALANCE]:
1581
            needed_mem += bep[constants.BE_MEMORY]
1582
        test = n_img.mfree < needed_mem
1583
        self._ErrorIf(test, self.ENODEN1, node,
1584
                      "not enough memory on to accommodate"
1585
                      " failovers should peer node %s fail", prinode)
1586

    
1587
  def _VerifyNodeFiles(self, ninfo, nresult, file_list, local_cksum,
1588
                       master_files):
1589
    """Verifies and computes the node required file checksums.
1590

1591
    @type ninfo: L{objects.Node}
1592
    @param ninfo: the node to check
1593
    @param nresult: the remote results for the node
1594
    @param file_list: required list of files
1595
    @param local_cksum: dictionary of local files and their checksums
1596
    @param master_files: list of files that only masters should have
1597

1598
    """
1599
    node = ninfo.name
1600
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1601

    
1602
    remote_cksum = nresult.get(constants.NV_FILELIST, None)
1603
    test = not isinstance(remote_cksum, dict)
1604
    _ErrorIf(test, self.ENODEFILECHECK, node,
1605
             "node hasn't returned file checksum data")
1606
    if test:
1607
      return
1608

    
1609
    for file_name in file_list:
1610
      node_is_mc = ninfo.master_candidate
1611
      must_have = (file_name not in master_files) or node_is_mc
1612
      # missing
1613
      test1 = file_name not in remote_cksum
1614
      # invalid checksum
1615
      test2 = not test1 and remote_cksum[file_name] != local_cksum[file_name]
1616
      # existing and good
1617
      test3 = not test1 and remote_cksum[file_name] == local_cksum[file_name]
1618
      _ErrorIf(test1 and must_have, self.ENODEFILECHECK, node,
1619
               "file '%s' missing", file_name)
1620
      _ErrorIf(test2 and must_have, self.ENODEFILECHECK, node,
1621
               "file '%s' has wrong checksum", file_name)
1622
      # not candidate and this is not a must-have file
1623
      _ErrorIf(test2 and not must_have, self.ENODEFILECHECK, node,
1624
               "file '%s' should not exist on non master"
1625
               " candidates (and the file is outdated)", file_name)
1626
      # all good, except non-master/non-must have combination
1627
      _ErrorIf(test3 and not must_have, self.ENODEFILECHECK, node,
1628
               "file '%s' should not exist"
1629
               " on non master candidates", file_name)
1630

    
1631
  def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_helper,
1632
                      drbd_map):
1633
    """Verifies and the node DRBD status.
1634

1635
    @type ninfo: L{objects.Node}
1636
    @param ninfo: the node to check
1637
    @param nresult: the remote results for the node
1638
    @param instanceinfo: the dict of instances
1639
    @param drbd_helper: the configured DRBD usermode helper
1640
    @param drbd_map: the DRBD map as returned by
1641
        L{ganeti.config.ConfigWriter.ComputeDRBDMap}
1642

1643
    """
1644
    node = ninfo.name
1645
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1646

    
1647
    if drbd_helper:
1648
      helper_result = nresult.get(constants.NV_DRBDHELPER, None)
1649
      test = (helper_result == None)
1650
      _ErrorIf(test, self.ENODEDRBDHELPER, node,
1651
               "no drbd usermode helper returned")
1652
      if helper_result:
1653
        status, payload = helper_result
1654
        test = not status
1655
        _ErrorIf(test, self.ENODEDRBDHELPER, node,
1656
                 "drbd usermode helper check unsuccessful: %s", payload)
1657
        test = status and (payload != drbd_helper)
1658
        _ErrorIf(test, self.ENODEDRBDHELPER, node,
1659
                 "wrong drbd usermode helper: %s", payload)
1660

    
1661
    # compute the DRBD minors
1662
    node_drbd = {}
1663
    for minor, instance in drbd_map[node].items():
1664
      test = instance not in instanceinfo
1665
      _ErrorIf(test, self.ECLUSTERCFG, None,
1666
               "ghost instance '%s' in temporary DRBD map", instance)
1667
        # ghost instance should not be running, but otherwise we
1668
        # don't give double warnings (both ghost instance and
1669
        # unallocated minor in use)
1670
      if test:
1671
        node_drbd[minor] = (instance, False)
1672
      else:
1673
        instance = instanceinfo[instance]
1674
        node_drbd[minor] = (instance.name, instance.admin_up)
1675

    
1676
    # and now check them
1677
    used_minors = nresult.get(constants.NV_DRBDLIST, [])
1678
    test = not isinstance(used_minors, (tuple, list))
1679
    _ErrorIf(test, self.ENODEDRBD, node,
1680
             "cannot parse drbd status file: %s", str(used_minors))
1681
    if test:
1682
      # we cannot check drbd status
1683
      return
1684

    
1685
    for minor, (iname, must_exist) in node_drbd.items():
1686
      test = minor not in used_minors and must_exist
1687
      _ErrorIf(test, self.ENODEDRBD, node,
1688
               "drbd minor %d of instance %s is not active", minor, iname)
1689
    for minor in used_minors:
1690
      test = minor not in node_drbd
1691
      _ErrorIf(test, self.ENODEDRBD, node,
1692
               "unallocated drbd minor %d is in use", minor)
1693

    
1694
  def _UpdateNodeOS(self, ninfo, nresult, nimg):
1695
    """Builds the node OS structures.
1696

1697
    @type ninfo: L{objects.Node}
1698
    @param ninfo: the node to check
1699
    @param nresult: the remote results for the node
1700
    @param nimg: the node image object
1701

1702
    """
1703
    node = ninfo.name
1704
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1705

    
1706
    remote_os = nresult.get(constants.NV_OSLIST, None)
1707
    test = (not isinstance(remote_os, list) or
1708
            not compat.all(isinstance(v, list) and len(v) == 7
1709
                           for v in remote_os))
1710

    
1711
    _ErrorIf(test, self.ENODEOS, node,
1712
             "node hasn't returned valid OS data")
1713

    
1714
    nimg.os_fail = test
1715

    
1716
    if test:
1717
      return
1718

    
1719
    os_dict = {}
1720

    
1721
    for (name, os_path, status, diagnose,
1722
         variants, parameters, api_ver) in nresult[constants.NV_OSLIST]:
1723

    
1724
      if name not in os_dict:
1725
        os_dict[name] = []
1726

    
1727
      # parameters is a list of lists instead of list of tuples due to
1728
      # JSON lacking a real tuple type, fix it:
1729
      parameters = [tuple(v) for v in parameters]
1730
      os_dict[name].append((os_path, status, diagnose,
1731
                            set(variants), set(parameters), set(api_ver)))
1732

    
1733
    nimg.oslist = os_dict
1734

    
1735
  def _VerifyNodeOS(self, ninfo, nimg, base):
1736
    """Verifies the node OS list.
1737

1738
    @type ninfo: L{objects.Node}
1739
    @param ninfo: the node to check
1740
    @param nimg: the node image object
1741
    @param base: the 'template' node we match against (e.g. from the master)
1742

1743
    """
1744
    node = ninfo.name
1745
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1746

    
1747
    assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?"
1748

    
1749
    for os_name, os_data in nimg.oslist.items():
1750
      assert os_data, "Empty OS status for OS %s?!" % os_name
1751
      f_path, f_status, f_diag, f_var, f_param, f_api = os_data[0]
1752
      _ErrorIf(not f_status, self.ENODEOS, node,
1753
               "Invalid OS %s (located at %s): %s", os_name, f_path, f_diag)
1754
      _ErrorIf(len(os_data) > 1, self.ENODEOS, node,
1755
               "OS '%s' has multiple entries (first one shadows the rest): %s",
1756
               os_name, utils.CommaJoin([v[0] for v in os_data]))
1757
      # this will catched in backend too
1758
      _ErrorIf(compat.any(v >= constants.OS_API_V15 for v in f_api)
1759
               and not f_var, self.ENODEOS, node,
1760
               "OS %s with API at least %d does not declare any variant",
1761
               os_name, constants.OS_API_V15)
1762
      # comparisons with the 'base' image
1763
      test = os_name not in base.oslist
1764
      _ErrorIf(test, self.ENODEOS, node,
1765
               "Extra OS %s not present on reference node (%s)",
1766
               os_name, base.name)
1767
      if test:
1768
        continue
1769
      assert base.oslist[os_name], "Base node has empty OS status?"
1770
      _, b_status, _, b_var, b_param, b_api = base.oslist[os_name][0]
1771
      if not b_status:
1772
        # base OS is invalid, skipping
1773
        continue
1774
      for kind, a, b in [("API version", f_api, b_api),
1775
                         ("variants list", f_var, b_var),
1776
                         ("parameters", f_param, b_param)]:
1777
        _ErrorIf(a != b, self.ENODEOS, node,
1778
                 "OS %s %s differs from reference node %s: %s vs. %s",
1779
                 kind, os_name, base.name,
1780
                 utils.CommaJoin(a), utils.CommaJoin(b))
1781

    
1782
    # check any missing OSes
1783
    missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
1784
    _ErrorIf(missing, self.ENODEOS, node,
1785
             "OSes present on reference node %s but missing on this node: %s",
1786
             base.name, utils.CommaJoin(missing))
1787

    
1788
  def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
1789
    """Verifies and updates the node volume data.
1790

1791
    This function will update a L{NodeImage}'s internal structures
1792
    with data from the remote call.
1793

1794
    @type ninfo: L{objects.Node}
1795
    @param ninfo: the node to check
1796
    @param nresult: the remote results for the node
1797
    @param nimg: the node image object
1798
    @param vg_name: the configured VG name
1799

1800
    """
1801
    node = ninfo.name
1802
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1803

    
1804
    nimg.lvm_fail = True
1805
    lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1806
    if vg_name is None:
1807
      pass
1808
    elif isinstance(lvdata, basestring):
1809
      _ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s",
1810
               utils.SafeEncode(lvdata))
1811
    elif not isinstance(lvdata, dict):
1812
      _ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)")
1813
    else:
1814
      nimg.volumes = lvdata
1815
      nimg.lvm_fail = False
1816

    
1817
  def _UpdateNodeInstances(self, ninfo, nresult, nimg):
1818
    """Verifies and updates the node instance list.
1819

1820
    If the listing was successful, then updates this node's instance
1821
    list. Otherwise, it marks the RPC call as failed for the instance
1822
    list key.
1823

1824
    @type ninfo: L{objects.Node}
1825
    @param ninfo: the node to check
1826
    @param nresult: the remote results for the node
1827
    @param nimg: the node image object
1828

1829
    """
1830
    idata = nresult.get(constants.NV_INSTANCELIST, None)
1831
    test = not isinstance(idata, list)
1832
    self._ErrorIf(test, self.ENODEHV, ninfo.name, "rpc call to node failed"
1833
                  " (instancelist): %s", utils.SafeEncode(str(idata)))
1834
    if test:
1835
      nimg.hyp_fail = True
1836
    else:
1837
      nimg.instances = idata
1838

    
1839
  def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
1840
    """Verifies and computes a node information map
1841

1842
    @type ninfo: L{objects.Node}
1843
    @param ninfo: the node to check
1844
    @param nresult: the remote results for the node
1845
    @param nimg: the node image object
1846
    @param vg_name: the configured VG name
1847

1848
    """
1849
    node = ninfo.name
1850
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1851

    
1852
    # try to read free memory (from the hypervisor)
1853
    hv_info = nresult.get(constants.NV_HVINFO, None)
1854
    test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
1855
    _ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)")
1856
    if not test:
1857
      try:
1858
        nimg.mfree = int(hv_info["memory_free"])
1859
      except (ValueError, TypeError):
1860
        _ErrorIf(True, self.ENODERPC, node,
1861
                 "node returned invalid nodeinfo, check hypervisor")
1862

    
1863
    # FIXME: devise a free space model for file based instances as well
1864
    if vg_name is not None:
1865
      test = (constants.NV_VGLIST not in nresult or
1866
              vg_name not in nresult[constants.NV_VGLIST])
1867
      _ErrorIf(test, self.ENODELVM, node,
1868
               "node didn't return data for the volume group '%s'"
1869
               " - it is either missing or broken", vg_name)
1870
      if not test:
1871
        try:
1872
          nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
1873
        except (ValueError, TypeError):
1874
          _ErrorIf(True, self.ENODERPC, node,
1875
                   "node returned invalid LVM info, check LVM status")
1876

    
1877
  def _CollectDiskInfo(self, nodelist, node_image, instanceinfo):
1878
    """Gets per-disk status information for all instances.
1879

1880
    @type nodelist: list of strings
1881
    @param nodelist: Node names
1882
    @type node_image: dict of (name, L{objects.Node})
1883
    @param node_image: Node objects
1884
    @type instanceinfo: dict of (name, L{objects.Instance})
1885
    @param instanceinfo: Instance objects
1886

1887
    """
1888
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1889

    
1890
    node_disks = {}
1891
    node_disks_devonly = {}
1892

    
1893
    for nname in nodelist:
1894
      disks = [(inst, disk)
1895
               for instlist in [node_image[nname].pinst,
1896
                                node_image[nname].sinst]
1897
               for inst in instlist
1898
               for disk in instanceinfo[inst].disks]
1899

    
1900
      if not disks:
1901
        # No need to collect data
1902
        continue
1903

    
1904
      node_disks[nname] = disks
1905

    
1906
      # Creating copies as SetDiskID below will modify the objects and that can
1907
      # lead to incorrect data returned from nodes
1908
      devonly = [dev.Copy() for (_, dev) in disks]
1909

    
1910
      for dev in devonly:
1911
        self.cfg.SetDiskID(dev, nname)
1912

    
1913
      node_disks_devonly[nname] = devonly
1914

    
1915
    assert len(node_disks) == len(node_disks_devonly)
1916

    
1917
    # Collect data from all nodes with disks
1918
    result = self.rpc.call_blockdev_getmirrorstatus_multi(node_disks.keys(),
1919
                                                          node_disks_devonly)
1920

    
1921
    assert len(result) == len(node_disks)
1922

    
1923
    instdisk = {}
1924

    
1925
    for (nname, nres) in result.items():
1926
      if nres.offline:
1927
        # Ignore offline node
1928
        continue
1929

    
1930
      disks = node_disks[nname]
1931

    
1932
      msg = nres.fail_msg
1933
      _ErrorIf(msg, self.ENODERPC, nname,
1934
               "while getting disk information: %s", nres.fail_msg)
1935
      if msg:
1936
        # No data from this node
1937
        data = len(disks) * [None]
1938
      else:
1939
        data = nres.payload
1940

    
1941
      for ((inst, _), status) in zip(disks, data):
1942
        instdisk.setdefault(inst, {}).setdefault(nname, []).append(status)
1943

    
1944
    assert compat.all(len(statuses) == len(instanceinfo[inst].disks) and
1945
                      len(nnames) <= len(instanceinfo[inst].all_nodes)
1946
                      for inst, nnames in instdisk.items()
1947
                      for nname, statuses in nnames.items())
1948

    
1949
    return instdisk
1950

    
1951
  def BuildHooksEnv(self):
1952
    """Build hooks env.
1953

1954
    Cluster-Verify hooks just ran in the post phase and their failure makes
1955
    the output be logged in the verify output and the verification to fail.
1956

1957
    """
1958
    all_nodes = self.cfg.GetNodeList()
1959
    env = {
1960
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
1961
      }
1962
    for node in self.cfg.GetAllNodesInfo().values():
1963
      env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
1964

    
1965
    return env, [], all_nodes
1966

    
1967
  def Exec(self, feedback_fn):
1968
    """Verify integrity of cluster, performing various test on nodes.
1969

1970
    """
1971
    self.bad = False
1972
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1973
    verbose = self.op.verbose
1974
    self._feedback_fn = feedback_fn
1975
    feedback_fn("* Verifying global settings")
1976
    for msg in self.cfg.VerifyConfig():
1977
      _ErrorIf(True, self.ECLUSTERCFG, None, msg)
1978

    
1979
    # Check the cluster certificates
1980
    for cert_filename in constants.ALL_CERT_FILES:
1981
      (errcode, msg) = _VerifyCertificate(cert_filename)
1982
      _ErrorIf(errcode, self.ECLUSTERCERT, None, msg, code=errcode)
1983

    
1984
    vg_name = self.cfg.GetVGName()
1985
    drbd_helper = self.cfg.GetDRBDHelper()
1986
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
1987
    cluster = self.cfg.GetClusterInfo()
1988
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
1989
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
1990
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
1991
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
1992
                        for iname in instancelist)
1993
    i_non_redundant = [] # Non redundant instances
1994
    i_non_a_balanced = [] # Non auto-balanced instances
1995
    n_offline = 0 # Count of offline nodes
1996
    n_drained = 0 # Count of nodes being drained
1997
    node_vol_should = {}
1998

    
1999
    # FIXME: verify OS list
2000
    # do local checksums
2001
    master_files = [constants.CLUSTER_CONF_FILE]
2002
    master_node = self.master_node = self.cfg.GetMasterNode()
2003
    master_ip = self.cfg.GetMasterIP()
2004

    
2005
    file_names = ssconf.SimpleStore().GetFileList()
2006
    file_names.extend(constants.ALL_CERT_FILES)
2007
    file_names.extend(master_files)
2008
    if cluster.modify_etc_hosts:
2009
      file_names.append(constants.ETC_HOSTS)
2010

    
2011
    local_checksums = utils.FingerprintFiles(file_names)
2012

    
2013
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
2014
    node_verify_param = {
2015
      constants.NV_FILELIST: file_names,
2016
      constants.NV_NODELIST: [node.name for node in nodeinfo
2017
                              if not node.offline],
2018
      constants.NV_HYPERVISOR: hypervisors,
2019
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
2020
                                  node.secondary_ip) for node in nodeinfo
2021
                                 if not node.offline],
2022
      constants.NV_INSTANCELIST: hypervisors,
2023
      constants.NV_VERSION: None,
2024
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
2025
      constants.NV_NODESETUP: None,
2026
      constants.NV_TIME: None,
2027
      constants.NV_MASTERIP: (master_node, master_ip),
2028
      constants.NV_OSLIST: None,
2029
      constants.NV_VMNODES: self.cfg.GetNonVmCapableNodeList(),
2030
      }
2031

    
2032
    if vg_name is not None:
2033
      node_verify_param[constants.NV_VGLIST] = None
2034
      node_verify_param[constants.NV_LVLIST] = vg_name
2035
      node_verify_param[constants.NV_PVLIST] = [vg_name]
2036
      node_verify_param[constants.NV_DRBDLIST] = None
2037

    
2038
    if drbd_helper:
2039
      node_verify_param[constants.NV_DRBDHELPER] = drbd_helper
2040

    
2041
    # Build our expected cluster state
2042
    node_image = dict((node.name, self.NodeImage(offline=node.offline,
2043
                                                 name=node.name,
2044
                                                 vm_capable=node.vm_capable))
2045
                      for node in nodeinfo)
2046

    
2047
    for instance in instancelist:
2048
      inst_config = instanceinfo[instance]
2049

    
2050
      for nname in inst_config.all_nodes:
2051
        if nname not in node_image:
2052
          # ghost node
2053
          gnode = self.NodeImage(name=nname)
2054
          gnode.ghost = True
2055
          node_image[nname] = gnode
2056

    
2057
      inst_config.MapLVsByNode(node_vol_should)
2058

    
2059
      pnode = inst_config.primary_node
2060
      node_image[pnode].pinst.append(instance)
2061

    
2062
      for snode in inst_config.secondary_nodes:
2063
        nimg = node_image[snode]
2064
        nimg.sinst.append(instance)
2065
        if pnode not in nimg.sbp:
2066
          nimg.sbp[pnode] = []
2067
        nimg.sbp[pnode].append(instance)
2068

    
2069
    # At this point, we have the in-memory data structures complete,
2070
    # except for the runtime information, which we'll gather next
2071

    
2072
    # Due to the way our RPC system works, exact response times cannot be
2073
    # guaranteed (e.g. a broken node could run into a timeout). By keeping the
2074
    # time before and after executing the request, we can at least have a time
2075
    # window.
2076
    nvinfo_starttime = time.time()
2077
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
2078
                                           self.cfg.GetClusterName())
2079
    nvinfo_endtime = time.time()
2080

    
2081
    all_drbd_map = self.cfg.ComputeDRBDMap()
2082

    
2083
    feedback_fn("* Gathering disk information (%s nodes)" % len(nodelist))
2084
    instdisk = self._CollectDiskInfo(nodelist, node_image, instanceinfo)
2085

    
2086
    feedback_fn("* Verifying node status")
2087

    
2088
    refos_img = None
2089

    
2090
    for node_i in nodeinfo:
2091
      node = node_i.name
2092
      nimg = node_image[node]
2093

    
2094
      if node_i.offline:
2095
        if verbose:
2096
          feedback_fn("* Skipping offline node %s" % (node,))
2097
        n_offline += 1
2098
        continue
2099

    
2100
      if node == master_node:
2101
        ntype = "master"
2102
      elif node_i.master_candidate:
2103
        ntype = "master candidate"
2104
      elif node_i.drained:
2105
        ntype = "drained"
2106
        n_drained += 1
2107
      else:
2108
        ntype = "regular"
2109
      if verbose:
2110
        feedback_fn("* Verifying node %s (%s)" % (node, ntype))
2111

    
2112
      msg = all_nvinfo[node].fail_msg
2113
      _ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg)
2114
      if msg:
2115
        nimg.rpc_fail = True
2116
        continue
2117

    
2118
      nresult = all_nvinfo[node].payload
2119

    
2120
      nimg.call_ok = self._VerifyNode(node_i, nresult)
2121
      self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
2122
      self._VerifyNodeNetwork(node_i, nresult)
2123
      self._VerifyNodeFiles(node_i, nresult, file_names, local_checksums,
2124
                            master_files)
2125

    
2126
      if nimg.vm_capable:
2127
        self._VerifyNodeLVM(node_i, nresult, vg_name)
2128
        self._VerifyNodeDrbd(node_i, nresult, instanceinfo, drbd_helper,
2129
                             all_drbd_map)
2130

    
2131
        self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
2132
        self._UpdateNodeInstances(node_i, nresult, nimg)
2133
        self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
2134
        self._UpdateNodeOS(node_i, nresult, nimg)
2135
        if not nimg.os_fail:
2136
          if refos_img is None:
2137
            refos_img = nimg
2138
          self._VerifyNodeOS(node_i, nimg, refos_img)
2139

    
2140
    feedback_fn("* Verifying instance status")
2141
    for instance in instancelist:
2142
      if verbose:
2143
        feedback_fn("* Verifying instance %s" % instance)
2144
      inst_config = instanceinfo[instance]
2145
      self._VerifyInstance(instance, inst_config, node_image,
2146
                           instdisk[instance])
2147
      inst_nodes_offline = []
2148

    
2149
      pnode = inst_config.primary_node
2150
      pnode_img = node_image[pnode]
2151
      _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
2152
               self.ENODERPC, pnode, "instance %s, connection to"
2153
               " primary node failed", instance)
2154

    
2155
      if pnode_img.offline:
2156
        inst_nodes_offline.append(pnode)
2157

    
2158
      # If the instance is non-redundant we cannot survive losing its primary
2159
      # node, so we are not N+1 compliant. On the other hand we have no disk
2160
      # templates with more than one secondary so that situation is not well
2161
      # supported either.
2162
      # FIXME: does not support file-backed instances
2163
      if not inst_config.secondary_nodes:
2164
        i_non_redundant.append(instance)
2165
      _ErrorIf(len(inst_config.secondary_nodes) > 1, self.EINSTANCELAYOUT,
2166
               instance, "instance has multiple secondary nodes: %s",
2167
               utils.CommaJoin(inst_config.secondary_nodes),
2168
               code=self.ETYPE_WARNING)
2169

    
2170
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
2171
        i_non_a_balanced.append(instance)
2172

    
2173
      for snode in inst_config.secondary_nodes:
2174
        s_img = node_image[snode]
2175
        _ErrorIf(s_img.rpc_fail and not s_img.offline, self.ENODERPC, snode,
2176
                 "instance %s, connection to secondary node failed", instance)
2177

    
2178
        if s_img.offline:
2179
          inst_nodes_offline.append(snode)
2180

    
2181
      # warn that the instance lives on offline nodes
2182
      _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
2183
               "instance lives on offline node(s) %s",
2184
               utils.CommaJoin(inst_nodes_offline))
2185
      # ... or ghost/non-vm_capable nodes
2186
      for node in inst_config.all_nodes:
2187
        _ErrorIf(node_image[node].ghost, self.EINSTANCEBADNODE, instance,
2188
                 "instance lives on ghost node %s", node)
2189
        _ErrorIf(not node_image[node].vm_capable, self.EINSTANCEBADNODE,
2190
                 instance, "instance lives on non-vm_capable node %s", node)
2191

    
2192
    feedback_fn("* Verifying orphan volumes")
2193
    reserved = utils.FieldSet(*cluster.reserved_lvs)
2194
    self._VerifyOrphanVolumes(node_vol_should, node_image, reserved)
2195

    
2196
    feedback_fn("* Verifying orphan instances")
2197
    self._VerifyOrphanInstances(instancelist, node_image)
2198

    
2199
    if constants.VERIFY_NPLUSONE_MEM not in self.op.skip_checks:
2200
      feedback_fn("* Verifying N+1 Memory redundancy")
2201
      self._VerifyNPlusOneMemory(node_image, instanceinfo)
2202

    
2203
    feedback_fn("* Other Notes")
2204
    if i_non_redundant:
2205
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
2206
                  % len(i_non_redundant))
2207

    
2208
    if i_non_a_balanced:
2209
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
2210
                  % len(i_non_a_balanced))
2211

    
2212
    if n_offline:
2213
      feedback_fn("  - NOTICE: %d offline node(s) found." % n_offline)
2214

    
2215
    if n_drained:
2216
      feedback_fn("  - NOTICE: %d drained node(s) found." % n_drained)
2217

    
2218
    return not self.bad
2219

    
2220
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
2221
    """Analyze the post-hooks' result
2222

2223
    This method analyses the hook result, handles it, and sends some
2224
    nicely-formatted feedback back to the user.
2225

2226
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
2227
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
2228
    @param hooks_results: the results of the multi-node hooks rpc call
2229
    @param feedback_fn: function used send feedback back to the caller
2230
    @param lu_result: previous Exec result
2231
    @return: the new Exec result, based on the previous result
2232
        and hook results
2233

2234
    """
2235
    # We only really run POST phase hooks, and are only interested in
2236
    # their results
2237
    if phase == constants.HOOKS_PHASE_POST:
2238
      # Used to change hooks' output to proper indentation
2239
      indent_re = re.compile('^', re.M)
2240
      feedback_fn("* Hooks Results")
2241
      assert hooks_results, "invalid result from hooks"
2242

    
2243
      for node_name in hooks_results:
2244
        res = hooks_results[node_name]
2245
        msg = res.fail_msg
2246
        test = msg and not res.offline
2247
        self._ErrorIf(test, self.ENODEHOOKS, node_name,
2248
                      "Communication failure in hooks execution: %s", msg)
2249
        if res.offline or msg:
2250
          # No need to investigate payload if node is offline or gave an error.
2251
          # override manually lu_result here as _ErrorIf only
2252
          # overrides self.bad
2253
          lu_result = 1
2254
          continue
2255
        for script, hkr, output in res.payload:
2256
          test = hkr == constants.HKR_FAIL
2257
          self._ErrorIf(test, self.ENODEHOOKS, node_name,
2258
                        "Script %s failed, output:", script)
2259
          if test:
2260
            output = indent_re.sub('      ', output)
2261
            feedback_fn("%s" % output)
2262
            lu_result = 0
2263

    
2264
      return lu_result
2265

    
2266

    
2267
class LUVerifyDisks(NoHooksLU):
2268
  """Verifies the cluster disks status.
2269

2270
  """
2271
  REQ_BGL = False
2272

    
2273
  def ExpandNames(self):
2274
    self.needed_locks = {
2275
      locking.LEVEL_NODE: locking.ALL_SET,
2276
      locking.LEVEL_INSTANCE: locking.ALL_SET,
2277
    }
2278
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
2279

    
2280
  def Exec(self, feedback_fn):
2281
    """Verify integrity of cluster disks.
2282

2283
    @rtype: tuple of three items
2284
    @return: a tuple of (dict of node-to-node_error, list of instances
2285
        which need activate-disks, dict of instance: (node, volume) for
2286
        missing volumes
2287

2288
    """
2289
    result = res_nodes, res_instances, res_missing = {}, [], {}
2290

    
2291
    vg_name = self.cfg.GetVGName()
2292
    nodes = utils.NiceSort(self.cfg.GetNodeList())
2293
    instances = [self.cfg.GetInstanceInfo(name)
2294
                 for name in self.cfg.GetInstanceList()]
2295

    
2296
    nv_dict = {}
2297
    for inst in instances:
2298
      inst_lvs = {}
2299
      if (not inst.admin_up or
2300
          inst.disk_template not in constants.DTS_NET_MIRROR):
2301
        continue
2302
      inst.MapLVsByNode(inst_lvs)
2303
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
2304
      for node, vol_list in inst_lvs.iteritems():
2305
        for vol in vol_list:
2306
          nv_dict[(node, vol)] = inst
2307

    
2308
    if not nv_dict:
2309
      return result
2310

    
2311
    node_lvs = self.rpc.call_lv_list(nodes, vg_name)
2312

    
2313
    for node in nodes:
2314
      # node_volume
2315
      node_res = node_lvs[node]
2316
      if node_res.offline:
2317
        continue
2318
      msg = node_res.fail_msg
2319
      if msg:
2320
        logging.warning("Error enumerating LVs on node %s: %s", node, msg)
2321
        res_nodes[node] = msg
2322
        continue
2323

    
2324
      lvs = node_res.payload
2325
      for lv_name, (_, _, lv_online) in lvs.items():
2326
        inst = nv_dict.pop((node, lv_name), None)
2327
        if (not lv_online and inst is not None
2328
            and inst.name not in res_instances):
2329
          res_instances.append(inst.name)
2330

    
2331
    # any leftover items in nv_dict are missing LVs, let's arrange the
2332
    # data better
2333
    for key, inst in nv_dict.iteritems():
2334
      if inst.name not in res_missing:
2335
        res_missing[inst.name] = []
2336
      res_missing[inst.name].append(key)
2337

    
2338
    return result
2339

    
2340

    
2341
class LURepairDiskSizes(NoHooksLU):
2342
  """Verifies the cluster disks sizes.
2343

2344
  """
2345
  _OP_PARAMS = [("instances", ht.EmptyList, ht.TListOf(ht.TNonEmptyString))]
2346
  REQ_BGL = False
2347

    
2348
  def ExpandNames(self):
2349
    if self.op.instances:
2350
      self.wanted_names = []
2351
      for name in self.op.instances:
2352
        full_name = _ExpandInstanceName(self.cfg, name)
2353
        self.wanted_names.append(full_name)
2354
      self.needed_locks = {
2355
        locking.LEVEL_NODE: [],
2356
        locking.LEVEL_INSTANCE: self.wanted_names,
2357
        }
2358
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2359
    else:
2360
      self.wanted_names = None
2361
      self.needed_locks = {
2362
        locking.LEVEL_NODE: locking.ALL_SET,
2363
        locking.LEVEL_INSTANCE: locking.ALL_SET,
2364
        }
2365
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
2366

    
2367
  def DeclareLocks(self, level):
2368
    if level == locking.LEVEL_NODE and self.wanted_names is not None:
2369
      self._LockInstancesNodes(primary_only=True)
2370

    
2371
  def CheckPrereq(self):
2372
    """Check prerequisites.
2373

2374
    This only checks the optional instance list against the existing names.
2375

2376
    """
2377
    if self.wanted_names is None:
2378
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2379

    
2380
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
2381
                             in self.wanted_names]
2382

    
2383
  def _EnsureChildSizes(self, disk):
2384
    """Ensure children of the disk have the needed disk size.
2385

2386
    This is valid mainly for DRBD8 and fixes an issue where the
2387
    children have smaller disk size.
2388

2389
    @param disk: an L{ganeti.objects.Disk} object
2390

2391
    """
2392
    if disk.dev_type == constants.LD_DRBD8:
2393
      assert disk.children, "Empty children for DRBD8?"
2394
      fchild = disk.children[0]
2395
      mismatch = fchild.size < disk.size
2396
      if mismatch:
2397
        self.LogInfo("Child disk has size %d, parent %d, fixing",
2398
                     fchild.size, disk.size)
2399
        fchild.size = disk.size
2400

    
2401
      # and we recurse on this child only, not on the metadev
2402
      return self._EnsureChildSizes(fchild) or mismatch
2403
    else:
2404
      return False
2405

    
2406
  def Exec(self, feedback_fn):
2407
    """Verify the size of cluster disks.
2408

2409
    """
2410
    # TODO: check child disks too
2411
    # TODO: check differences in size between primary/secondary nodes
2412
    per_node_disks = {}
2413
    for instance in self.wanted_instances:
2414
      pnode = instance.primary_node
2415
      if pnode not in per_node_disks:
2416
        per_node_disks[pnode] = []
2417
      for idx, disk in enumerate(instance.disks):
2418
        per_node_disks[pnode].append((instance, idx, disk))
2419

    
2420
    changed = []
2421
    for node, dskl in per_node_disks.items():
2422
      newl = [v[2].Copy() for v in dskl]
2423
      for dsk in newl:
2424
        self.cfg.SetDiskID(dsk, node)
2425
      result = self.rpc.call_blockdev_getsizes(node, newl)
2426
      if result.fail_msg:
2427
        self.LogWarning("Failure in blockdev_getsizes call to node"
2428
                        " %s, ignoring", node)
2429
        continue
2430
      if len(result.data) != len(dskl):
2431
        self.LogWarning("Invalid result from node %s, ignoring node results",
2432
                        node)
2433
        continue
2434
      for ((instance, idx, disk), size) in zip(dskl, result.data):
2435
        if size is None:
2436
          self.LogWarning("Disk %d of instance %s did not return size"
2437
                          " information, ignoring", idx, instance.name)
2438
          continue
2439
        if not isinstance(size, (int, long)):
2440
          self.LogWarning("Disk %d of instance %s did not return valid"
2441
                          " size information, ignoring", idx, instance.name)
2442
          continue
2443
        size = size >> 20
2444
        if size != disk.size:
2445
          self.LogInfo("Disk %d of instance %s has mismatched size,"
2446
                       " correcting: recorded %d, actual %d", idx,
2447
                       instance.name, disk.size, size)
2448
          disk.size = size
2449
          self.cfg.Update(instance, feedback_fn)
2450
          changed.append((instance.name, idx, size))
2451
        if self._EnsureChildSizes(disk):
2452
          self.cfg.Update(instance, feedback_fn)
2453
          changed.append((instance.name, idx, disk.size))
2454
    return changed
2455

    
2456

    
2457
class LURenameCluster(LogicalUnit):
2458
  """Rename the cluster.
2459

2460
  """
2461
  HPATH = "cluster-rename"
2462
  HTYPE = constants.HTYPE_CLUSTER
2463
  _OP_PARAMS = [("name", ht.NoDefault, ht.TNonEmptyString)]
2464

    
2465
  def BuildHooksEnv(self):
2466
    """Build hooks env.
2467

2468
    """
2469
    env = {
2470
      "OP_TARGET": self.cfg.GetClusterName(),
2471
      "NEW_NAME": self.op.name,
2472
      }
2473
    mn = self.cfg.GetMasterNode()
2474
    all_nodes = self.cfg.GetNodeList()
2475
    return env, [mn], all_nodes
2476

    
2477
  def CheckPrereq(self):
2478
    """Verify that the passed name is a valid one.
2479

2480
    """
2481
    hostname = netutils.GetHostname(name=self.op.name,
2482
                                    family=self.cfg.GetPrimaryIPFamily())
2483

    
2484
    new_name = hostname.name
2485
    self.ip = new_ip = hostname.ip
2486
    old_name = self.cfg.GetClusterName()
2487
    old_ip = self.cfg.GetMasterIP()
2488
    if new_name == old_name and new_ip == old_ip:
2489
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
2490
                                 " cluster has changed",
2491
                                 errors.ECODE_INVAL)
2492
    if new_ip != old_ip:
2493
      if netutils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
2494
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
2495
                                   " reachable on the network" %
2496
                                   new_ip, errors.ECODE_NOTUNIQUE)
2497

    
2498
    self.op.name = new_name
2499

    
2500
  def Exec(self, feedback_fn):
2501
    """Rename the cluster.
2502

2503
    """
2504
    clustername = self.op.name
2505
    ip = self.ip
2506

    
2507
    # shutdown the master IP
2508
    master = self.cfg.GetMasterNode()
2509
    result = self.rpc.call_node_stop_master(master, False)
2510
    result.Raise("Could not disable the master role")
2511

    
2512
    try:
2513
      cluster = self.cfg.GetClusterInfo()
2514
      cluster.cluster_name = clustername
2515
      cluster.master_ip = ip
2516
      self.cfg.Update(cluster, feedback_fn)
2517

    
2518
      # update the known hosts file
2519
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
2520
      node_list = self.cfg.GetNodeList()
2521
      try:
2522
        node_list.remove(master)
2523
      except ValueError:
2524
        pass
2525
      _UploadHelper(self, node_list, constants.SSH_KNOWN_HOSTS_FILE)
2526
    finally:
2527
      result = self.rpc.call_node_start_master(master, False, False)
2528
      msg = result.fail_msg
2529
      if msg:
2530
        self.LogWarning("Could not re-enable the master role on"
2531
                        " the master, please restart manually: %s", msg)
2532

    
2533
    return clustername
2534

    
2535

    
2536
class LUSetClusterParams(LogicalUnit):
2537
  """Change the parameters of the cluster.
2538

2539
  """
2540
  HPATH = "cluster-modify"
2541
  HTYPE = constants.HTYPE_CLUSTER
2542
  _OP_PARAMS = [
2543
    ("vg_name", None, ht.TMaybeString),
2544
    ("enabled_hypervisors", None,
2545
     ht.TOr(ht.TAnd(ht.TListOf(ht.TElemOf(constants.HYPER_TYPES)), ht.TTrue),
2546
            ht.TNone)),
2547
    ("hvparams", None, ht.TOr(ht.TDictOf(ht.TNonEmptyString, ht.TDict),
2548
                              ht.TNone)),
2549
    ("beparams", None, ht.TOr(ht.TDictOf(ht.TNonEmptyString, ht.TDict),
2550
                              ht.TNone)),
2551
    ("os_hvp", None, ht.TOr(ht.TDictOf(ht.TNonEmptyString, ht.TDict),
2552
                            ht.TNone)),
2553
    ("osparams", None, ht.TOr(ht.TDictOf(ht.TNonEmptyString, ht.TDict),
2554
                              ht.TNone)),
2555
    ("candidate_pool_size", None, ht.TOr(ht.TStrictPositiveInt, ht.TNone)),
2556
    ("uid_pool", None, ht.NoType),
2557
    ("add_uids", None, ht.NoType),
2558
    ("remove_uids", None, ht.NoType),
2559
    ("maintain_node_health", None, ht.TMaybeBool),
2560
    ("prealloc_wipe_disks", None, ht.TMaybeBool),
2561
    ("nicparams", None, ht.TOr(ht.TDict, ht.TNone)),
2562
    ("drbd_helper", None, ht.TOr(ht.TString, ht.TNone)),
2563
    ("default_iallocator", None, ht.TOr(ht.TString, ht.TNone)),
2564
    ("reserved_lvs", None, ht.TOr(ht.TListOf(ht.TNonEmptyString), ht.TNone)),
2565
    ("hidden_os", None, ht.TOr(ht.TListOf(\
2566
          ht.TAnd(ht.TList,
2567
                ht.TIsLength(2),
2568
                ht.TMap(lambda v: v[0], ht.TElemOf(constants.DDMS_VALUES)))),
2569
          ht.TNone)),
2570
    ("blacklisted_os", None, ht.TOr(ht.TListOf(\
2571
          ht.TAnd(ht.TList,
2572
                ht.TIsLength(2),
2573
                ht.TMap(lambda v: v[0], ht.TElemOf(constants.DDMS_VALUES)))),
2574
          ht.TNone)),
2575
    ]
2576
  REQ_BGL = False
2577

    
2578
  def CheckArguments(self):
2579
    """Check parameters
2580

2581
    """
2582
    if self.op.uid_pool:
2583
      uidpool.CheckUidPool(self.op.uid_pool)
2584

    
2585
    if self.op.add_uids:
2586
      uidpool.CheckUidPool(self.op.add_uids)
2587

    
2588
    if self.op.remove_uids:
2589
      uidpool.CheckUidPool(self.op.remove_uids)
2590

    
2591
  def ExpandNames(self):
2592
    # FIXME: in the future maybe other cluster params won't require checking on
2593
    # all nodes to be modified.
2594
    self.needed_locks = {
2595
      locking.LEVEL_NODE: locking.ALL_SET,
2596
    }
2597
    self.share_locks[locking.LEVEL_NODE] = 1
2598

    
2599
  def BuildHooksEnv(self):
2600
    """Build hooks env.
2601

2602
    """
2603
    env = {
2604
      "OP_TARGET": self.cfg.GetClusterName(),
2605
      "NEW_VG_NAME": self.op.vg_name,
2606
      }
2607
    mn = self.cfg.GetMasterNode()
2608
    return env, [mn], [mn]
2609

    
2610
  def CheckPrereq(self):
2611
    """Check prerequisites.
2612

2613
    This checks whether the given params don't conflict and
2614
    if the given volume group is valid.
2615

2616
    """
2617
    if self.op.vg_name is not None and not self.op.vg_name:
2618
      if self.cfg.HasAnyDiskOfType(constants.LD_LV):
2619
        raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
2620
                                   " instances exist", errors.ECODE_INVAL)
2621

    
2622
    if self.op.drbd_helper is not None and not self.op.drbd_helper:
2623
      if self.cfg.HasAnyDiskOfType(constants.LD_DRBD8):
2624
        raise errors.OpPrereqError("Cannot disable drbd helper while"
2625
                                   " drbd-based instances exist",
2626
                                   errors.ECODE_INVAL)
2627

    
2628
    node_list = self.acquired_locks[locking.LEVEL_NODE]
2629

    
2630
    # if vg_name not None, checks given volume group on all nodes
2631
    if self.op.vg_name:
2632
      vglist = self.rpc.call_vg_list(node_list)
2633
      for node in node_list:
2634
        msg = vglist[node].fail_msg
2635
        if msg:
2636
          # ignoring down node
2637
          self.LogWarning("Error while gathering data on node %s"
2638
                          " (ignoring node): %s", node, msg)
2639
          continue
2640
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
2641
                                              self.op.vg_name,
2642
                                              constants.MIN_VG_SIZE)
2643
        if vgstatus:
2644
          raise errors.OpPrereqError("Error on node '%s': %s" %
2645
                                     (node, vgstatus), errors.ECODE_ENVIRON)
2646

    
2647
    if self.op.drbd_helper:
2648
      # checks given drbd helper on all nodes
2649
      helpers = self.rpc.call_drbd_helper(node_list)
2650
      for node in node_list:
2651
        ninfo = self.cfg.GetNodeInfo(node)
2652
        if ninfo.offline:
2653
          self.LogInfo("Not checking drbd helper on offline node %s", node)
2654
          continue
2655
        msg = helpers[node].fail_msg
2656
        if msg:
2657
          raise errors.OpPrereqError("Error checking drbd helper on node"
2658
                                     " '%s': %s" % (node, msg),
2659
                                     errors.ECODE_ENVIRON)
2660
        node_helper = helpers[node].payload
2661
        if node_helper != self.op.drbd_helper:
2662
          raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
2663
                                     (node, node_helper), errors.ECODE_ENVIRON)
2664

    
2665
    self.cluster = cluster = self.cfg.GetClusterInfo()
2666
    # validate params changes
2667
    if self.op.beparams:
2668
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
2669
      self.new_beparams = cluster.SimpleFillBE(self.op.beparams)
2670

    
2671
    if self.op.nicparams:
2672
      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
2673
      self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
2674
      objects.NIC.CheckParameterSyntax(self.new_nicparams)
2675
      nic_errors = []
2676

    
2677
      # check all instances for consistency
2678
      for instance in self.cfg.GetAllInstancesInfo().values():
2679
        for nic_idx, nic in enumerate(instance.nics):
2680
          params_copy = copy.deepcopy(nic.nicparams)
2681
          params_filled = objects.FillDict(self.new_nicparams, params_copy)
2682

    
2683
          # check parameter syntax
2684
          try:
2685
            objects.NIC.CheckParameterSyntax(params_filled)
2686
          except errors.ConfigurationError, err:
2687
            nic_errors.append("Instance %s, nic/%d: %s" %
2688
                              (instance.name, nic_idx, err))
2689

    
2690
          # if we're moving instances to routed, check that they have an ip
2691
          target_mode = params_filled[constants.NIC_MODE]
2692
          if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
2693
            nic_errors.append("Instance %s, nic/%d: routed nick with no ip" %
2694
                              (instance.name, nic_idx))
2695
      if nic_errors:
2696
        raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
2697
                                   "\n".join(nic_errors))
2698

    
2699
    # hypervisor list/parameters
2700
    self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
2701
    if self.op.hvparams:
2702
      for hv_name, hv_dict in self.op.hvparams.items():
2703
        if hv_name not in self.new_hvparams:
2704
          self.new_hvparams[hv_name] = hv_dict
2705
        else:
2706
          self.new_hvparams[hv_name].update(hv_dict)
2707

    
2708
    # os hypervisor parameters
2709
    self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
2710
    if self.op.os_hvp:
2711
      for os_name, hvs in self.op.os_hvp.items():
2712
        if os_name not in self.new_os_hvp:
2713
          self.new_os_hvp[os_name] = hvs
2714
        else:
2715
          for hv_name, hv_dict in hvs.items():
2716
            if hv_name not in self.new_os_hvp[os_name]:
2717
              self.new_os_hvp[os_name][hv_name] = hv_dict
2718
            else:
2719
              self.new_os_hvp[os_name][hv_name].update(hv_dict)
2720

    
2721
    # os parameters
2722
    self.new_osp = objects.FillDict(cluster.osparams, {})
2723
    if self.op.osparams:
2724
      for os_name, osp in self.op.osparams.items():
2725
        if os_name not in self.new_osp:
2726
          self.new_osp[os_name] = {}
2727

    
2728
        self.new_osp[os_name] = _GetUpdatedParams(self.new_osp[os_name], osp,
2729
                                                  use_none=True)
2730

    
2731
        if not self.new_osp[os_name]:
2732
          # we removed all parameters
2733
          del self.new_osp[os_name]
2734
        else:
2735
          # check the parameter validity (remote check)
2736
          _CheckOSParams(self, False, [self.cfg.GetMasterNode()],
2737
                         os_name, self.new_osp[os_name])
2738

    
2739
    # changes to the hypervisor list
2740
    if self.op.enabled_hypervisors is not None:
2741
      self.hv_list = self.op.enabled_hypervisors
2742
      for hv in self.hv_list:
2743
        # if the hypervisor doesn't already exist in the cluster
2744
        # hvparams, we initialize it to empty, and then (in both
2745
        # cases) we make sure to fill the defaults, as we might not
2746
        # have a complete defaults list if the hypervisor wasn't
2747
        # enabled before
2748
        if hv not in new_hvp:
2749
          new_hvp[hv] = {}
2750
        new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
2751
        utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
2752
    else:
2753
      self.hv_list = cluster.enabled_hypervisors
2754

    
2755
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
2756
      # either the enabled list has changed, or the parameters have, validate
2757
      for hv_name, hv_params in self.new_hvparams.items():
2758
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
2759
            (self.op.enabled_hypervisors and
2760
             hv_name in self.op.enabled_hypervisors)):
2761
          # either this is a new hypervisor, or its parameters have changed
2762
          hv_class = hypervisor.GetHypervisor(hv_name)
2763
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2764
          hv_class.CheckParameterSyntax(hv_params)
2765
          _CheckHVParams(self, node_list, hv_name, hv_params)
2766

    
2767
    if self.op.os_hvp:
2768
      # no need to check any newly-enabled hypervisors, since the
2769
      # defaults have already been checked in the above code-block
2770
      for os_name, os_hvp in self.new_os_hvp.items():
2771
        for hv_name, hv_params in os_hvp.items():
2772
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2773
          # we need to fill in the new os_hvp on top of the actual hv_p
2774
          cluster_defaults = self.new_hvparams.get(hv_name, {})
2775
          new_osp = objects.FillDict(cluster_defaults, hv_params)
2776
          hv_class = hypervisor.GetHypervisor(hv_name)
2777
          hv_class.CheckParameterSyntax(new_osp)
2778
          _CheckHVParams(self, node_list, hv_name, new_osp)
2779

    
2780
    if self.op.default_iallocator:
2781
      alloc_script = utils.FindFile(self.op.default_iallocator,
2782
                                    constants.IALLOCATOR_SEARCH_PATH,
2783
                                    os.path.isfile)
2784
      if alloc_script is None:
2785
        raise errors.OpPrereqError("Invalid default iallocator script '%s'"
2786
                                   " specified" % self.op.default_iallocator,
2787
                                   errors.ECODE_INVAL)
2788

    
2789
  def Exec(self, feedback_fn):
2790
    """Change the parameters of the cluster.
2791

2792
    """
2793
    if self.op.vg_name is not None:
2794
      new_volume = self.op.vg_name
2795
      if not new_volume:
2796
        new_volume = None
2797
      if new_volume != self.cfg.GetVGName():
2798
        self.cfg.SetVGName(new_volume)
2799
      else:
2800
        feedback_fn("Cluster LVM configuration already in desired"
2801
                    " state, not changing")
2802
    if self.op.drbd_helper is not None:
2803
      new_helper = self.op.drbd_helper
2804
      if not new_helper:
2805
        new_helper = None
2806
      if new_helper != self.cfg.GetDRBDHelper():
2807
        self.cfg.SetDRBDHelper(new_helper)
2808
      else:
2809
        feedback_fn("Cluster DRBD helper already in desired state,"
2810
                    " not changing")
2811
    if self.op.hvparams:
2812
      self.cluster.hvparams = self.new_hvparams
2813
    if self.op.os_hvp:
2814
      self.cluster.os_hvp = self.new_os_hvp
2815
    if self.op.enabled_hypervisors is not None:
2816
      self.cluster.hvparams = self.new_hvparams
2817
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
2818
    if self.op.beparams:
2819
      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
2820
    if self.op.nicparams:
2821
      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
2822
    if self.op.osparams:
2823
      self.cluster.osparams = self.new_osp
2824

    
2825
    if self.op.candidate_pool_size is not None:
2826
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
2827
      # we need to update the pool size here, otherwise the save will fail
2828
      _AdjustCandidatePool(self, [])
2829

    
2830
    if self.op.maintain_node_health is not None:
2831
      self.cluster.maintain_node_health = self.op.maintain_node_health
2832

    
2833
    if self.op.prealloc_wipe_disks is not None:
2834
      self.cluster.prealloc_wipe_disks = self.op.prealloc_wipe_disks
2835

    
2836
    if self.op.add_uids is not None:
2837
      uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
2838

    
2839
    if self.op.remove_uids is not None:
2840
      uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
2841

    
2842
    if self.op.uid_pool is not None:
2843
      self.cluster.uid_pool = self.op.uid_pool
2844

    
2845
    if self.op.default_iallocator is not None:
2846
      self.cluster.default_iallocator = self.op.default_iallocator
2847

    
2848
    if self.op.reserved_lvs is not None:
2849
      self.cluster.reserved_lvs = self.op.reserved_lvs
2850

    
2851
    def helper_os(aname, mods, desc):
2852
      desc += " OS list"
2853
      lst = getattr(self.cluster, aname)
2854
      for key, val in mods:
2855
        if key == constants.DDM_ADD:
2856
          if val in lst:
2857
            feedback_fn("OS %s already in %s, ignoring", val, desc)
2858
          else:
2859
            lst.append(val)
2860
        elif key == constants.DDM_REMOVE:
2861
          if val in lst:
2862
            lst.remove(val)
2863
          else:
2864
            feedback_fn("OS %s not found in %s, ignoring", val, desc)
2865
        else:
2866
          raise errors.ProgrammerError("Invalid modification '%s'" % key)
2867

    
2868
    if self.op.hidden_os:
2869
      helper_os("hidden_os", self.op.hidden_os, "hidden")
2870

    
2871
    if self.op.blacklisted_os:
2872
      helper_os("blacklisted_os", self.op.blacklisted_os, "blacklisted")
2873

    
2874
    self.cfg.Update(self.cluster, feedback_fn)
2875

    
2876

    
2877
def _UploadHelper(lu, nodes, fname):
2878
  """Helper for uploading a file and showing warnings.
2879

2880
  """
2881
  if os.path.exists(fname):
2882
    result = lu.rpc.call_upload_file(nodes, fname)
2883
    for to_node, to_result in result.items():
2884
      msg = to_result.fail_msg
2885
      if msg:
2886
        msg = ("Copy of file %s to node %s failed: %s" %
2887
               (fname, to_node, msg))
2888
        lu.proc.LogWarning(msg)
2889

    
2890

    
2891
def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
2892
  """Distribute additional files which are part of the cluster configuration.
2893

2894
  ConfigWriter takes care of distributing the config and ssconf files, but
2895
  there are more files which should be distributed to all nodes. This function
2896
  makes sure those are copied.
2897

2898
  @param lu: calling logical unit
2899
  @param additional_nodes: list of nodes not in the config to distribute to
2900
  @type additional_vm: boolean
2901
  @param additional_vm: whether the additional nodes are vm-capable or not
2902

2903
  """
2904
  # 1. Gather target nodes
2905
  myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
2906
  dist_nodes = lu.cfg.GetOnlineNodeList()
2907
  nvm_nodes = lu.cfg.GetNonVmCapableNodeList()
2908
  vm_nodes = [name for name in dist_nodes if name not in nvm_nodes]
2909
  if additional_nodes is not None:
2910
    dist_nodes.extend(additional_nodes)
2911
    if additional_vm:
2912
      vm_nodes.extend(additional_nodes)
2913
  if myself.name in dist_nodes:
2914
    dist_nodes.remove(myself.name)
2915
  if myself.name in vm_nodes:
2916
    vm_nodes.remove(myself.name)
2917

    
2918
  # 2. Gather files to distribute
2919
  dist_files = set([constants.ETC_HOSTS,
2920
                    constants.SSH_KNOWN_HOSTS_FILE,
2921
                    constants.RAPI_CERT_FILE,
2922
                    constants.RAPI_USERS_FILE,
2923
                    constants.CONFD_HMAC_KEY,
2924
                    constants.CLUSTER_DOMAIN_SECRET_FILE,
2925
                   ])
2926

    
2927
  vm_files = set()
2928
  enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
2929
  for hv_name in enabled_hypervisors:
2930
    hv_class = hypervisor.GetHypervisor(hv_name)
2931
    vm_files.update(hv_class.GetAncillaryFiles())
2932

    
2933
  # 3. Perform the files upload
2934
  for fname in dist_files:
2935
    _UploadHelper(lu, dist_nodes, fname)
2936
  for fname in vm_files:
2937
    _UploadHelper(lu, vm_nodes, fname)
2938

    
2939

    
2940
class LURedistributeConfig(NoHooksLU):
2941
  """Force the redistribution of cluster configuration.
2942

2943
  This is a very simple LU.
2944

2945
  """
2946
  REQ_BGL = False
2947

    
2948
  def ExpandNames(self):
2949
    self.needed_locks = {
2950
      locking.LEVEL_NODE: locking.ALL_SET,
2951
    }
2952
    self.share_locks[locking.LEVEL_NODE] = 1
2953

    
2954
  def Exec(self, feedback_fn):
2955
    """Redistribute the configuration.
2956

2957
    """
2958
    self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
2959
    _RedistributeAncillaryFiles(self)
2960

    
2961

    
2962
def _WaitForSync(lu, instance, disks=None, oneshot=False):
2963
  """Sleep and poll for an instance's disk to sync.
2964

2965
  """
2966
  if not instance.disks or disks is not None and not disks:
2967
    return True
2968

    
2969
  disks = _ExpandCheckDisks(instance, disks)
2970

    
2971
  if not oneshot:
2972
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
2973

    
2974
  node = instance.primary_node
2975

    
2976
  for dev in disks:
2977
    lu.cfg.SetDiskID(dev, node)
2978

    
2979
  # TODO: Convert to utils.Retry
2980

    
2981
  retries = 0
2982
  degr_retries = 10 # in seconds, as we sleep 1 second each time
2983
  while True:
2984
    max_time = 0
2985
    done = True
2986
    cumul_degraded = False
2987
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, disks)
2988
    msg = rstats.fail_msg
2989
    if msg:
2990
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
2991
      retries += 1
2992
      if retries >= 10:
2993
        raise errors.RemoteError("Can't contact node %s for mirror data,"
2994
                                 " aborting." % node)
2995
      time.sleep(6)
2996
      continue
2997
    rstats = rstats.payload
2998
    retries = 0
2999
    for i, mstat in enumerate(rstats):
3000
      if mstat is None:
3001
        lu.LogWarning("Can't compute data for node %s/%s",
3002
                           node, disks[i].iv_name)
3003
        continue
3004

    
3005
      cumul_degraded = (cumul_degraded or
3006
                        (mstat.is_degraded and mstat.sync_percent is None))
3007
      if mstat.sync_percent is not None:
3008
        done = False
3009
        if mstat.estimated_time is not None:
3010
          rem_time = ("%s remaining (estimated)" %
3011
                      utils.FormatSeconds(mstat.estimated_time))
3012
          max_time = mstat.estimated_time
3013
        else:
3014
          rem_time = "no time estimate"
3015
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
3016
                        (disks[i].iv_name, mstat.sync_percent, rem_time))
3017

    
3018
    # if we're done but degraded, let's do a few small retries, to
3019
    # make sure we see a stable and not transient situation; therefore
3020
    # we force restart of the loop
3021
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
3022
      logging.info("Degraded disks found, %d retries left", degr_retries)
3023
      degr_retries -= 1
3024
      time.sleep(1)
3025
      continue
3026

    
3027
    if done or oneshot:
3028
      break
3029

    
3030
    time.sleep(min(60, max_time))
3031

    
3032
  if done:
3033
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
3034
  return not cumul_degraded
3035

    
3036

    
3037
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
3038
  """Check that mirrors are not degraded.
3039

3040
  The ldisk parameter, if True, will change the test from the
3041
  is_degraded attribute (which represents overall non-ok status for
3042
  the device(s)) to the ldisk (representing the local storage status).
3043

3044
  """
3045
  lu.cfg.SetDiskID(dev, node)
3046

    
3047
  result = True
3048

    
3049
  if on_primary or dev.AssembleOnSecondary():
3050
    rstats = lu.rpc.call_blockdev_find(node, dev)
3051
    msg = rstats.fail_msg
3052
    if msg:
3053
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
3054
      result = False
3055
    elif not rstats.payload:
3056
      lu.LogWarning("Can't find disk on node %s", node)
3057
      result = False
3058
    else:
3059
      if ldisk:
3060
        result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
3061
      else:
3062
        result = result and not rstats.payload.is_degraded
3063

    
3064
  if dev.children:
3065
    for child in dev.children:
3066
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
3067

    
3068
  return result
3069

    
3070

    
3071
class LUDiagnoseOS(NoHooksLU):
3072
  """Logical unit for OS diagnose/query.
3073

3074
  """
3075
  _OP_PARAMS = [
3076
    _POutputFields,
3077
    ("names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString)),
3078
    ]
3079
  REQ_BGL = False
3080
  _HID = "hidden"
3081
  _BLK = "blacklisted"
3082
  _VLD = "valid"
3083
  _FIELDS_STATIC = utils.FieldSet()
3084
  _FIELDS_DYNAMIC = utils.FieldSet("name", _VLD, "node_status", "variants",
3085
                                   "parameters", "api_versions", _HID, _BLK)
3086

    
3087
  def CheckArguments(self):
3088
    if self.op.names:
3089
      raise errors.OpPrereqError("Selective OS query not supported",
3090
                                 errors.ECODE_INVAL)
3091

    
3092
    _CheckOutputFields(static=self._FIELDS_STATIC,
3093
                       dynamic=self._FIELDS_DYNAMIC,
3094
                       selected=self.op.output_fields)
3095

    
3096
  def ExpandNames(self):
3097
    # Lock all nodes, in shared mode
3098
    # Temporary removal of locks, should be reverted later
3099
    # TODO: reintroduce locks when they are lighter-weight
3100
    self.needed_locks = {}
3101
    #self.share_locks[locking.LEVEL_NODE] = 1
3102
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3103

    
3104
  @staticmethod
3105
  def _DiagnoseByOS(rlist):
3106
    """Remaps a per-node return list into an a per-os per-node dictionary
3107

3108
    @param rlist: a map with node names as keys and OS objects as values
3109

3110
    @rtype: dict
3111
    @return: a dictionary with osnames as keys and as value another
3112
        map, with nodes as keys and tuples of (path, status, diagnose,
3113
        variants, parameters, api_versions) as values, eg::
3114

3115
          {"debian-etch": {"node1": [(/usr/lib/..., True, "", [], []),
3116
                                     (/srv/..., False, "invalid api")],
3117
                           "node2": [(/srv/..., True, "", [], [])]}
3118
          }
3119

3120
    """
3121
    all_os = {}
3122
    # we build here the list of nodes that didn't fail the RPC (at RPC
3123
    # level), so that nodes with a non-responding node daemon don't
3124
    # make all OSes invalid
3125
    good_nodes = [node_name for node_name in rlist
3126
                  if not rlist[node_name].fail_msg]
3127
    for node_name, nr in rlist.items():
3128
      if nr.fail_msg or not nr.payload:
3129
        continue
3130
      for (name, path, status, diagnose, variants,
3131
           params, api_versions) in nr.payload:
3132
        if name not in all_os:
3133
          # build a list of nodes for this os containing empty lists
3134
          # for each node in node_list
3135
          all_os[name] = {}
3136
          for nname in good_nodes:
3137
            all_os[name][nname] = []
3138
        # convert params from [name, help] to (name, help)
3139
        params = [tuple(v) for v in params]
3140
        all_os[name][node_name].append((path, status, diagnose,
3141
                                        variants, params, api_versions))
3142
    return all_os
3143

    
3144
  def Exec(self, feedback_fn):
3145
    """Compute the list of OSes.
3146

3147
    """
3148
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
3149
    node_data = self.rpc.call_os_diagnose(valid_nodes)
3150
    pol = self._DiagnoseByOS(node_data)
3151
    output = []
3152
    cluster = self.cfg.GetClusterInfo()
3153

    
3154
    for os_name in utils.NiceSort(pol.keys()):
3155
      os_data = pol[os_name]
3156
      row = []
3157
      valid = True
3158
      (variants, params, api_versions) = null_state = (set(), set(), set())
3159
      for idx, osl in enumerate(os_data.values()):
3160
        valid = bool(valid and osl and osl[0][1])
3161
        if not valid:
3162
          (variants, params, api_versions) = null_state
3163
          break
3164
        node_variants, node_params, node_api = osl[0][3:6]
3165
        if idx == 0: # first entry
3166
          variants = set(node_variants)
3167
          params = set(node_params)
3168
          api_versions = set(node_api)
3169
        else: # keep consistency
3170
          variants.intersection_update(node_variants)
3171
          params.intersection_update(node_params)
3172
          api_versions.intersection_update(node_api)
3173

    
3174
      is_hid = os_name in cluster.hidden_os
3175
      is_blk = os_name in cluster.blacklisted_os
3176
      if ((self._HID not in self.op.output_fields and is_hid) or
3177
          (self._BLK not in self.op.output_fields and is_blk) or
3178
          (self._VLD not in self.op.output_fields and not valid)):
3179
        continue
3180

    
3181
      for field in self.op.output_fields:
3182
        if field == "name":
3183
          val = os_name
3184
        elif field == self._VLD:
3185
          val = valid
3186
        elif field == "node_status":
3187
          # this is just a copy of the dict
3188
          val = {}
3189
          for node_name, nos_list in os_data.items():
3190
            val[node_name] = nos_list
3191
        elif field == "variants":
3192
          val = utils.NiceSort(list(variants))
3193
        elif field == "parameters":
3194
          val = list(params)
3195
        elif field == "api_versions":
3196
          val = list(api_versions)
3197
        elif field == self._HID:
3198
          val = is_hid
3199
        elif field == self._BLK:
3200
          val = is_blk
3201
        else:
3202
          raise errors.ParameterError(field)
3203
        row.append(val)
3204
      output.append(row)
3205

    
3206
    return output
3207

    
3208

    
3209
class LURemoveNode(LogicalUnit):
3210
  """Logical unit for removing a node.
3211

3212
  """
3213
  HPATH = "node-remove"
3214
  HTYPE = constants.HTYPE_NODE
3215
  _OP_PARAMS = [
3216
    _PNodeName,
3217
    ]
3218

    
3219
  def BuildHooksEnv(self):
3220
    """Build hooks env.
3221

3222
    This doesn't run on the target node in the pre phase as a failed
3223
    node would then be impossible to remove.
3224

3225
    """
3226
    env = {
3227
      "OP_TARGET": self.op.node_name,
3228
      "NODE_NAME": self.op.node_name,
3229
      }
3230
    all_nodes = self.cfg.GetNodeList()
3231
    try:
3232
      all_nodes.remove(self.op.node_name)
3233
    except ValueError:
3234
      logging.warning("Node %s which is about to be removed not found"
3235
                      " in the all nodes list", self.op.node_name)
3236
    return env, all_nodes, all_nodes
3237

    
3238
  def CheckPrereq(self):
3239
    """Check prerequisites.
3240

3241
    This checks:
3242
     - the node exists in the configuration
3243
     - it does not have primary or secondary instances
3244
     - it's not the master
3245

3246
    Any errors are signaled by raising errors.OpPrereqError.
3247

3248
    """
3249
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3250
    node = self.cfg.GetNodeInfo(self.op.node_name)
3251
    assert node is not None
3252

    
3253
    instance_list = self.cfg.GetInstanceList()
3254

    
3255
    masternode = self.cfg.GetMasterNode()
3256
    if node.name == masternode:
3257
      raise errors.OpPrereqError("Node is the master node,"
3258
                                 " you need to failover first.",
3259
                                 errors.ECODE_INVAL)
3260

    
3261
    for instance_name in instance_list:
3262
      instance = self.cfg.GetInstanceInfo(instance_name)
3263
      if node.name in instance.all_nodes:
3264
        raise errors.OpPrereqError("Instance %s is still running on the node,"
3265
                                   " please remove first." % instance_name,
3266
                                   errors.ECODE_INVAL)
3267
    self.op.node_name = node.name
3268
    self.node = node
3269

    
3270
  def Exec(self, feedback_fn):
3271
    """Removes the node from the cluster.
3272

3273
    """
3274
    node = self.node
3275
    logging.info("Stopping the node daemon and removing configs from node %s",
3276
                 node.name)
3277

    
3278
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
3279

    
3280
    # Promote nodes to master candidate as needed
3281
    _AdjustCandidatePool(self, exceptions=[node.name])
3282
    self.context.RemoveNode(node.name)
3283

    
3284
    # Run post hooks on the node before it's removed
3285
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
3286
    try:
3287
      hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
3288
    except:
3289
      # pylint: disable-msg=W0702
3290
      self.LogWarning("Errors occurred running hooks on %s" % node.name)
3291

    
3292
    result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
3293
    msg = result.fail_msg
3294
    if msg:
3295
      self.LogWarning("Errors encountered on the remote node while leaving"
3296
                      " the cluster: %s", msg)
3297

    
3298
    # Remove node from our /etc/hosts
3299
    if self.cfg.GetClusterInfo().modify_etc_hosts:
3300
      master_node = self.cfg.GetMasterNode()
3301
      result = self.rpc.call_etc_hosts_modify(master_node,
3302
                                              constants.ETC_HOSTS_REMOVE,
3303
                                              node.name, None)
3304
      result.Raise("Can't update hosts file with new host data")
3305
      _RedistributeAncillaryFiles(self)
3306

    
3307

    
3308
class LUQueryNodes(NoHooksLU):
3309
  """Logical unit for querying nodes.
3310

3311
  """
3312
  # pylint: disable-msg=W0142
3313
  _OP_PARAMS = [
3314
    _POutputFields,
3315
    ("names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString)),
3316
    ("use_locking", False, ht.TBool),
3317
    ]
3318
  REQ_BGL = False
3319

    
3320
  _SIMPLE_FIELDS = ["name", "serial_no", "ctime", "mtime", "uuid",
3321
                    "master_candidate", "offline", "drained",
3322
                    "master_capable", "vm_capable"]
3323

    
3324
  _FIELDS_DYNAMIC = utils.FieldSet(
3325
    "dtotal", "dfree",
3326
    "mtotal", "mnode", "mfree",
3327
    "bootid",
3328
    "ctotal", "cnodes", "csockets",
3329
    )
3330

    
3331
  _FIELDS_STATIC = utils.FieldSet(*[
3332
    "pinst_cnt", "sinst_cnt",
3333
    "pinst_list", "sinst_list",
3334
    "pip", "sip", "tags",
3335
    "master",
3336
    "role"] + _SIMPLE_FIELDS
3337
    )
3338

    
3339
  def CheckArguments(self):
3340
    _CheckOutputFields(static=self._FIELDS_STATIC,
3341
                       dynamic=self._FIELDS_DYNAMIC,
3342
                       selected=self.op.output_fields)
3343

    
3344
  def ExpandNames(self):
3345
    self.needed_locks = {}
3346
    self.share_locks[locking.LEVEL_NODE] = 1
3347

    
3348
    if self.op.names:
3349
      self.wanted = _GetWantedNodes(self, self.op.names)
3350
    else:
3351
      self.wanted = locking.ALL_SET
3352

    
3353
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
3354
    self.do_locking = self.do_node_query and self.op.use_locking
3355
    if self.do_locking:
3356
      # if we don't request only static fields, we need to lock the nodes
3357
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
3358

    
3359
  def Exec(self, feedback_fn):
3360
    """Computes the list of nodes and their attributes.
3361

3362
    """
3363
    all_info = self.cfg.GetAllNodesInfo()
3364
    if self.do_locking:
3365
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
3366
    elif self.wanted != locking.ALL_SET:
3367
      nodenames = self.wanted
3368
      missing = set(nodenames).difference(all_info.keys())
3369
      if missing:
3370
        raise errors.OpExecError(
3371
          "Some nodes were removed before retrieving their data: %s" % missing)
3372
    else:
3373
      nodenames = all_info.keys()
3374

    
3375
    nodenames = utils.NiceSort(nodenames)
3376
    nodelist = [all_info[name] for name in nodenames]
3377

    
3378
    # begin data gathering
3379

    
3380
    if self.do_node_query:
3381
      live_data = {}
3382
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
3383
                                          self.cfg.GetHypervisorType())
3384
      for name in nodenames:
3385
        nodeinfo = node_data[name]
3386
        if not nodeinfo.fail_msg and nodeinfo.payload:
3387
          nodeinfo = nodeinfo.payload
3388
          fn = utils.TryConvert
3389
          live_data[name] = {
3390
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
3391
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
3392
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
3393
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
3394
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
3395
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
3396
            "bootid": nodeinfo.get('bootid', None),
3397
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
3398
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
3399
            }
3400
        else:
3401
          live_data[name] = {}
3402
    else:
3403
      live_data = dict.fromkeys(nodenames, {})
3404

    
3405
    node_to_primary = dict([(name, set()) for name in nodenames])
3406
    node_to_secondary = dict([(name, set()) for name in nodenames])
3407

    
3408
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
3409
                             "sinst_cnt", "sinst_list"))
3410
    if inst_fields & frozenset(self.op.output_fields):
3411
      inst_data = self.cfg.GetAllInstancesInfo()
3412

    
3413
      for inst in inst_data.values():
3414
        if inst.primary_node in node_to_primary:
3415
          node_to_primary[inst.primary_node].add(inst.name)
3416
        for secnode in inst.secondary_nodes:
3417
          if secnode in node_to_secondary:
3418
            node_to_secondary[secnode].add(inst.name)
3419

    
3420
    master_node = self.cfg.GetMasterNode()
3421

    
3422
    # end data gathering
3423

    
3424
    output = []
3425
    for node in nodelist:
3426
      node_output = []
3427
      for field in self.op.output_fields:
3428
        if field in self._SIMPLE_FIELDS:
3429
          val = getattr(node, field)
3430
        elif field == "pinst_list":
3431
          val = list(node_to_primary[node.name])
3432
        elif field == "sinst_list":
3433
          val = list(node_to_secondary[node.name])
3434
        elif field == "pinst_cnt":
3435
          val = len(node_to_primary[node.name])
3436
        elif field == "sinst_cnt":
3437
          val = len(node_to_secondary[node.name])
3438
        elif field == "pip":
3439
          val = node.primary_ip
3440
        elif field == "sip":
3441
          val = node.secondary_ip
3442
        elif field == "tags":
3443
          val = list(node.GetTags())
3444
        elif field == "master":
3445
          val = node.name == master_node
3446
        elif self._FIELDS_DYNAMIC.Matches(field):
3447
          val = live_data[node.name].get(field, None)
3448
        elif field == "role":
3449
          if node.name == master_node:
3450
            val = "M"
3451
          elif node.master_candidate:
3452
            val = "C"
3453
          elif node.drained:
3454
            val = "D"
3455
          elif node.offline:
3456
            val = "O"
3457
          else:
3458
            val = "R"
3459
        else:
3460
          raise errors.ParameterError(field)
3461
        node_output.append(val)
3462
      output.append(node_output)
3463

    
3464
    return output
3465

    
3466

    
3467
class LUQueryNodeVolumes(NoHooksLU):
3468
  """Logical unit for getting volumes on node(s).
3469

3470
  """
3471
  _OP_PARAMS = [
3472
    ("nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString)),
3473
    ("output_fields", ht.NoDefault, ht.TListOf(ht.TNonEmptyString)),
3474
    ]
3475
  REQ_BGL = False
3476
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
3477
  _FIELDS_STATIC = utils.FieldSet("node")
3478

    
3479
  def CheckArguments(self):
3480
    _CheckOutputFields(static=self._FIELDS_STATIC,
3481
                       dynamic=self._FIELDS_DYNAMIC,
3482
                       selected=self.op.output_fields)
3483

    
3484
  def ExpandNames(self):
3485
    self.needed_locks = {}
3486
    self.share_locks[locking.LEVEL_NODE] = 1
3487
    if not self.op.nodes:
3488
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3489
    else:
3490
      self.needed_locks[locking.LEVEL_NODE] = \
3491
        _GetWantedNodes(self, self.op.nodes)
3492

    
3493
  def Exec(self, feedback_fn):
3494
    """Computes the list of nodes and their attributes.
3495

3496
    """
3497
    nodenames = self.acquired_locks[locking.LEVEL_NODE]
3498
    volumes = self.rpc.call_node_volumes(nodenames)
3499

    
3500
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
3501
             in self.cfg.GetInstanceList()]
3502

    
3503
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
3504

    
3505
    output = []
3506
    for node in nodenames:
3507
      nresult = volumes[node]
3508
      if nresult.offline:
3509
        continue
3510
      msg = nresult.fail_msg
3511
      if msg:
3512
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
3513
        continue
3514

    
3515
      node_vols = nresult.payload[:]
3516
      node_vols.sort(key=lambda vol: vol['dev'])
3517

    
3518
      for vol in node_vols:
3519
        node_output = []
3520
        for field in self.op.output_fields:
3521
          if field == "node":
3522
            val = node
3523
          elif field == "phys":
3524
            val = vol['dev']
3525
          elif field == "vg":
3526
            val = vol['vg']
3527
          elif field == "name":
3528
            val = vol['name']
3529
          elif field == "size":
3530
            val = int(float(vol['size']))
3531
          elif field == "instance":
3532
            for inst in ilist:
3533
              if node not in lv_by_node[inst]:
3534
                continue
3535
              if vol['name'] in lv_by_node[inst][node]:
3536
                val = inst.name
3537
                break
3538
            else:
3539
              val = '-'
3540
          else:
3541
            raise errors.ParameterError(field)
3542
          node_output.append(str(val))
3543

    
3544
        output.append(node_output)
3545

    
3546
    return output
3547

    
3548

    
3549
class LUQueryNodeStorage(NoHooksLU):
3550
  """Logical unit for getting information on storage units on node(s).
3551

3552
  """
3553
  _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
3554
  _OP_PARAMS = [
3555
    ("nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString)),
3556
    ("storage_type", ht.NoDefault, _CheckStorageType),
3557
    ("output_fields", ht.NoDefault, ht.TListOf(ht.TNonEmptyString)),
3558
    ("name", None, ht.TMaybeString),
3559
    ]
3560
  REQ_BGL = False
3561

    
3562
  def CheckArguments(self):
3563
    _CheckOutputFields(static=self._FIELDS_STATIC,
3564
                       dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
3565
                       selected=self.op.output_fields)
3566

    
3567
  def ExpandNames(self):
3568
    self.needed_locks = {}
3569
    self.share_locks[locking.LEVEL_NODE] = 1
3570

    
3571
    if self.op.nodes:
3572
      self.needed_locks[locking.LEVEL_NODE] = \
3573
        _GetWantedNodes(self, self.op.nodes)
3574
    else:
3575
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3576

    
3577
  def Exec(self, feedback_fn):
3578
    """Computes the list of nodes and their attributes.
3579

3580
    """
3581
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
3582

    
3583
    # Always get name to sort by
3584
    if constants.SF_NAME in self.op.output_fields:
3585
      fields = self.op.output_fields[:]
3586
    else:
3587
      fields = [constants.SF_NAME] + self.op.output_fields
3588

    
3589
    # Never ask for node or type as it's only known to the LU
3590
    for extra in [constants.SF_NODE, constants.SF_TYPE]:
3591
      while extra in fields:
3592
        fields.remove(extra)
3593

    
3594
    field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
3595
    name_idx = field_idx[constants.SF_NAME]
3596

    
3597
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
3598
    data = self.rpc.call_storage_list(self.nodes,
3599
                                      self.op.storage_type, st_args,
3600
                                      self.op.name, fields)
3601

    
3602
    result = []
3603

    
3604
    for node in utils.NiceSort(self.nodes):
3605
      nresult = data[node]
3606
      if nresult.offline:
3607
        continue
3608

    
3609
      msg = nresult.fail_msg
3610
      if msg:
3611
        self.LogWarning("Can't get storage data from node %s: %s", node, msg)
3612
        continue
3613

    
3614
      rows = dict([(row[name_idx], row) for row in nresult.payload])
3615

    
3616
      for name in utils.NiceSort(rows.keys()):
3617
        row = rows[name]
3618

    
3619
        out = []
3620

    
3621
        for field in self.op.output_fields:
3622
          if field == constants.SF_NODE:
3623
            val = node
3624
          elif field == constants.SF_TYPE:
3625
            val = self.op.storage_type
3626
          elif field in field_idx:
3627
            val = row[field_idx[field]]
3628
          else:
3629
            raise errors.ParameterError(field)
3630

    
3631
          out.append(val)
3632

    
3633
        result.append(out)
3634

    
3635
    return result
3636

    
3637

    
3638
class LUModifyNodeStorage(NoHooksLU):
3639
  """Logical unit for modifying a storage volume on a node.
3640

3641
  """
3642
  _OP_PARAMS = [
3643
    _PNodeName,
3644
    ("storage_type", ht.NoDefault, _CheckStorageType),
3645
    ("name", ht.NoDefault, ht.TNonEmptyString),
3646
    ("changes", ht.NoDefault, ht.TDict),
3647
    ]
3648
  REQ_BGL = False
3649

    
3650
  def CheckArguments(self):
3651
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3652

    
3653
    storage_type = self.op.storage_type
3654

    
3655
    try:
3656
      modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
3657
    except KeyError:
3658
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
3659
                                 " modified" % storage_type,
3660
                                 errors.ECODE_INVAL)
3661

    
3662
    diff = set(self.op.changes.keys()) - modifiable
3663
    if diff:
3664
      raise errors.OpPrereqError("The following fields can not be modified for"
3665
                                 " storage units of type '%s': %r" %
3666
                                 (storage_type, list(diff)),
3667
                                 errors.ECODE_INVAL)
3668

    
3669
  def ExpandNames(self):
3670
    self.needed_locks = {
3671
      locking.LEVEL_NODE: self.op.node_name,
3672
      }
3673

    
3674
  def Exec(self, feedback_fn):
3675
    """Computes the list of nodes and their attributes.
3676

3677
    """
3678
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
3679
    result = self.rpc.call_storage_modify(self.op.node_name,
3680
                                          self.op.storage_type, st_args,
3681
                                          self.op.name, self.op.changes)
3682
    result.Raise("Failed to modify storage unit '%s' on %s" %
3683
                 (self.op.name, self.op.node_name))
3684

    
3685

    
3686
class LUAddNode(LogicalUnit):
3687
  """Logical unit for adding node to the cluster.
3688

3689
  """
3690
  HPATH = "node-add"
3691
  HTYPE = constants.HTYPE_NODE
3692
  _OP_PARAMS = [
3693
    _PNodeName,
3694
    ("primary_ip", None, ht.NoType),
3695
    ("secondary_ip", None, ht.TMaybeString),
3696
    ("readd", False, ht.TBool),
3697
    ("group", None, ht.TMaybeString),
3698
    ("master_capable", None, ht.TMaybeBool),
3699
    ("vm_capable", None, ht.TMaybeBool),
3700
    ]
3701
  _NFLAGS = ["master_capable", "vm_capable"]
3702

    
3703
  def CheckArguments(self):
3704
    self.primary_ip_family = self.cfg.GetPrimaryIPFamily()
3705
    # validate/normalize the node name
3706
    self.hostname = netutils.GetHostname(name=self.op.node_name,
3707
                                         family=self.primary_ip_family)
3708
    self.op.node_name = self.hostname.name
3709
    if self.op.readd and self.op.group:
3710
      raise errors.OpPrereqError("Cannot pass a node group when a node is"
3711
                                 " being readded", errors.ECODE_INVAL)
3712

    
3713
  def BuildHooksEnv(self):
3714
    """Build hooks env.
3715

3716
    This will run on all nodes before, and on all nodes + the new node after.
3717

3718
    """
3719
    env = {
3720
      "OP_TARGET": self.op.node_name,
3721
      "NODE_NAME": self.op.node_name,
3722
      "NODE_PIP": self.op.primary_ip,
3723
      "NODE_SIP": self.op.secondary_ip,
3724
      "MASTER_CAPABLE": str(self.op.master_capable),
3725
      "VM_CAPABLE": str(self.op.vm_capable),
3726
      }
3727
    nodes_0 = self.cfg.GetNodeList()
3728
    nodes_1 = nodes_0 + [self.op.node_name, ]
3729
    return env, nodes_0, nodes_1
3730

    
3731
  def CheckPrereq(self):
3732
    """Check prerequisites.
3733

3734
    This checks:
3735
     - the new node is not already in the config
3736
     - it is resolvable
3737
     - its parameters (single/dual homed) matches the cluster
3738

3739
    Any errors are signaled by raising errors.OpPrereqError.
3740

3741
    """
3742
    cfg = self.cfg
3743
    hostname = self.hostname
3744
    node = hostname.name
3745
    primary_ip = self.op.primary_ip = hostname.ip
3746
    if self.op.secondary_ip is None:
3747
      if self.primary_ip_family == netutils.IP6Address.family:
3748
        raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
3749
                                   " IPv4 address must be given as secondary",
3750
                                   errors.ECODE_INVAL)
3751
      self.op.secondary_ip = primary_ip
3752

    
3753
    secondary_ip = self.op.secondary_ip
3754
    if not netutils.IP4Address.IsValid(secondary_ip):
3755
      raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
3756
                                 " address" % secondary_ip, errors.ECODE_INVAL)
3757

    
3758
    node_list = cfg.GetNodeList()
3759
    if not self.op.readd and node in node_list:
3760
      raise errors.OpPrereqError("Node %s is already in the configuration" %
3761
                                 node, errors.ECODE_EXISTS)
3762
    elif self.op.readd and node not in node_list:
3763
      raise errors.OpPrereqError("Node %s is not in the configuration" % node,
3764
                                 errors.ECODE_NOENT)
3765

    
3766
    self.changed_primary_ip = False
3767

    
3768
    for existing_node_name in node_list:
3769
      existing_node = cfg.GetNodeInfo(existing_node_name)
3770

    
3771
      if self.op.readd and node == existing_node_name:
3772
        if existing_node.secondary_ip != secondary_ip:
3773
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
3774
                                     " address configuration as before",
3775
                                     errors.ECODE_INVAL)
3776
        if existing_node.primary_ip != primary_ip:
3777
          self.changed_primary_ip = True
3778

    
3779
        continue
3780

    
3781
      if (existing_node.primary_ip == primary_ip or
3782
          existing_node.secondary_ip == primary_ip or
3783
          existing_node.primary_ip == secondary_ip or
3784
          existing_node.secondary_ip == secondary_ip):
3785
        raise errors.OpPrereqError("New node ip address(es) conflict with"
3786
                                   " existing node %s" % existing_node.name,
3787
                                   errors.ECODE_NOTUNIQUE)
3788

    
3789
    # After this 'if' block, None is no longer a valid value for the
3790
    # _capable op attributes
3791
    if self.op.readd:
3792
      old_node = self.cfg.GetNodeInfo(node)
3793
      assert old_node is not None, "Can't retrieve locked node %s" % node
3794
      for attr in self._NFLAGS:
3795
        if getattr(self.op, attr) is None:
3796
          setattr(self.op, attr, getattr(old_node, attr))
3797
    else:
3798
      for attr in self._NFLAGS:
3799
        if getattr(self.op, attr) is None:
3800
          setattr(self.op, attr, True)
3801

    
3802
    if self.op.readd and not self.op.vm_capable:
3803
      pri, sec = cfg.GetNodeInstances(node)
3804
      if pri or sec:
3805
        raise errors.OpPrereqError("Node %s being re-added with vm_capable"
3806
                                   " flag set to false, but it already holds"
3807
                                   " instances" % node,
3808
                                   errors.ECODE_STATE)
3809

    
3810
    # check that the type of the node (single versus dual homed) is the
3811
    # same as for the master
3812
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
3813
    master_singlehomed = myself.secondary_ip == myself.primary_ip
3814
    newbie_singlehomed = secondary_ip == primary_ip
3815
    if master_singlehomed != newbie_singlehomed:
3816
      if master_singlehomed:
3817
        raise errors.OpPrereqError("The master has no private ip but the"
3818
                                   " new node has one",
3819
                                   errors.ECODE_INVAL)
3820
      else:
3821
        raise errors.OpPrereqError("The master has a private ip but the"
3822
                                   " new node doesn't have one",
3823
                                   errors.ECODE_INVAL)
3824

    
3825
    # checks reachability
3826
    if not netutils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
3827
      raise errors.OpPrereqError("Node not reachable by ping",
3828
                                 errors.ECODE_ENVIRON)
3829

    
3830
    if not newbie_singlehomed:
3831
      # check reachability from my secondary ip to newbie's secondary ip
3832
      if not netutils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
3833
                           source=myself.secondary_ip):
3834
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
3835
                                   " based ping to noded port",
3836
                                   errors.ECODE_ENVIRON)
3837

    
3838
    if self.op.readd:
3839
      exceptions = [node]
3840
    else:
3841
      exceptions = []
3842

    
3843
    if self.op.master_capable:
3844
      self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
3845
    else:
3846
      self.master_candidate = False
3847

    
3848
    if self.op.readd:
3849
      self.new_node = old_node
3850
    else:
3851
      node_group = cfg.LookupNodeGroup(self.op.group)
3852
      self.new_node = objects.Node(name=node,
3853
                                   primary_ip=primary_ip,
3854
                                   secondary_ip=secondary_ip,
3855
                                   master_candidate=self.master_candidate,
3856
                                   offline=False, drained=False,
3857
                                   group=node_group)
3858

    
3859
  def Exec(self, feedback_fn):
3860
    """Adds the new node to the cluster.
3861

3862
    """
3863
    new_node = self.new_node
3864
    node = new_node.name
3865

    
3866
    # for re-adds, reset the offline/drained/master-candidate flags;
3867
    # we need to reset here, otherwise offline would prevent RPC calls
3868
    # later in the procedure; this also means that if the re-add
3869
    # fails, we are left with a non-offlined, broken node
3870
    if self.op.readd:
3871
      new_node.drained = new_node.offline = False # pylint: disable-msg=W0201
3872
      self.LogInfo("Readding a node, the offline/drained flags were reset")
3873
      # if we demote the node, we do cleanup later in the procedure
3874
      new_node.master_candidate = self.master_candidate
3875
      if self.changed_primary_ip:
3876
        new_node.primary_ip = self.op.primary_ip
3877

    
3878
    # copy the master/vm_capable flags
3879
    for attr in self._NFLAGS:
3880
      setattr(new_node, attr, getattr(self.op, attr))
3881

    
3882
    # notify the user about any possible mc promotion
3883
    if new_node.master_candidate:
3884
      self.LogInfo("Node will be a master candidate")
3885

    
3886
    # check connectivity
3887
    result = self.rpc.call_version([node])[node]
3888
    result.Raise("Can't get version information from node %s" % node)
3889
    if constants.PROTOCOL_VERSION == result.payload:
3890
      logging.info("Communication to node %s fine, sw version %s match",
3891
                   node, result.payload)
3892
    else:
3893
      raise errors.OpExecError("Version mismatch master version %s,"
3894
                               " node version %s" %
3895
                               (constants.PROTOCOL_VERSION, result.payload))
3896

    
3897
    # Add node to our /etc/hosts, and add key to known_hosts
3898
    if self.cfg.GetClusterInfo().modify_etc_hosts:
3899
      master_node = self.cfg.GetMasterNode()
3900
      result = self.rpc.call_etc_hosts_modify(master_node,
3901
                                              constants.ETC_HOSTS_ADD,
3902
                                              self.hostname.name,
3903
                                              self.hostname.ip)
3904
      result.Raise("Can't update hosts file with new host data")
3905

    
3906
    if new_node.secondary_ip != new_node.primary_ip:
3907
      result = self.rpc.call_node_has_ip_address(new_node.name,
3908
                                                 new_node.secondary_ip)
3909
      result.Raise("Failure checking secondary ip on node %s" % new_node.name,
3910
                   prereq=True, ecode=errors.ECODE_ENVIRON)
3911
      if not result.payload:
3912
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
3913
                                 " you gave (%s). Please fix and re-run this"
3914
                                 " command." % new_node.secondary_ip)
3915

    
3916
    node_verify_list = [self.cfg.GetMasterNode()]
3917
    node_verify_param = {
3918
      constants.NV_NODELIST: [node],
3919
      # TODO: do a node-net-test as well?
3920
    }
3921

    
3922
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
3923
                                       self.cfg.GetClusterName())
3924
    for verifier in node_verify_list:
3925
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
3926
      nl_payload = result[verifier].payload[constants.NV_NODELIST]
3927
      if nl_payload:
3928
        for failed in nl_payload:
3929
          feedback_fn("ssh/hostname verification failed"
3930
                      " (checking from %s): %s" %
3931
                      (verifier, nl_payload[failed]))
3932
        raise errors.OpExecError("ssh/hostname verification failed.")
3933

    
3934
    if self.op.readd:
3935
      _RedistributeAncillaryFiles(self)
3936
      self.context.ReaddNode(new_node)
3937
      # make sure we redistribute the config
3938
      self.cfg.Update(new_node, feedback_fn)
3939
      # and make sure the new node will not have old files around
3940
      if not new_node.master_candidate:
3941
        result = self.rpc.call_node_demote_from_mc(new_node.name)
3942
        msg = result.fail_msg
3943
        if msg:
3944
          self.LogWarning("Node failed to demote itself from master"
3945
                          " candidate status: %s" % msg)
3946
    else:
3947
      _RedistributeAncillaryFiles(self, additional_nodes=[node],
3948
                                  additional_vm=self.op.vm_capable)
3949
      self.context.AddNode(new_node, self.proc.GetECId())
3950

    
3951

    
3952
class LUSetNodeParams(LogicalUnit):
3953
  """Modifies the parameters of a node.
3954

3955
  @cvar _F2R: a dictionary from tuples of flags (mc, drained, offline)
3956
      to the node role (as _ROLE_*)
3957
  @cvar _R2F: a dictionary from node role to tuples of flags
3958
  @cvar _FLAGS: a list of attribute names corresponding to the flags
3959

3960
  """
3961
  HPATH = "node-modify"
3962
  HTYPE = constants.HTYPE_NODE
3963
  _OP_PARAMS = [
3964
    _PNodeName,
3965
    ("master_candidate", None, ht.TMaybeBool),
3966
    ("offline", None, ht.TMaybeBool),
3967
    ("drained", None, ht.TMaybeBool),
3968
    ("auto_promote", False, ht.TBool),
3969
    ("master_capable", None, ht.TMaybeBool),
3970
    ("vm_capable", None, ht.TMaybeBool),
3971
    _PForce,
3972
    ]
3973
  REQ_BGL = False
3974
  (_ROLE_CANDIDATE, _ROLE_DRAINED, _ROLE_OFFLINE, _ROLE_REGULAR) = range(4)
3975
  _F2R = {
3976
    (True, False, False): _ROLE_CANDIDATE,
3977
    (False, True, False): _ROLE_DRAINED,
3978
    (False, False, True): _ROLE_OFFLINE,
3979
    (False, False, False): _ROLE_REGULAR,
3980
    }
3981
  _R2F = dict((v, k) for k, v in _F2R.items())
3982
  _FLAGS = ["master_candidate", "drained", "offline"]
3983

    
3984
  def CheckArguments(self):
3985
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3986
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained,
3987
                self.op.master_capable, self.op.vm_capable]
3988
    if all_mods.count(None) == len(all_mods):
3989
      raise errors.OpPrereqError("Please pass at least one modification",
3990
                                 errors.ECODE_INVAL)
3991
    if all_mods.count(True) > 1:
3992
      raise errors.OpPrereqError("Can't set the node into more than one"
3993
                                 " state at the same time",
3994
                                 errors.ECODE_INVAL)
3995

    
3996
    # Boolean value that tells us whether we might be demoting from MC
3997
    self.might_demote = (self.op.master_candidate == False or
3998
                         self.op.offline == True or
3999
                         self.op.drained == True or
4000
                         self.op.master_capable == False)
4001

    
4002
    self.lock_all = self.op.auto_promote and self.might_demote
4003

    
4004
  def ExpandNames(self):
4005
    if self.lock_all:
4006
      self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
4007
    else:
4008
      self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
4009

    
4010
  def BuildHooksEnv(self):
4011
    """Build hooks env.
4012

4013
    This runs on the master node.
4014

4015
    """
4016
    env = {
4017
      "OP_TARGET": self.op.node_name,
4018
      "MASTER_CANDIDATE": str(self.op.master_candidate),
4019
      "OFFLINE": str(self.op.offline),
4020
      "DRAINED": str(self.op.drained),
4021
      "MASTER_CAPABLE": str(self.op.master_capable),
4022
      "VM_CAPABLE": str(self.op.vm_capable),
4023
      }
4024
    nl = [self.cfg.GetMasterNode(),
4025
          self.op.node_name]
4026
    return env, nl, nl
4027

    
4028
  def CheckPrereq(self):
4029
    """Check prerequisites.
4030

4031
    This only checks the instance list against the existing names.
4032

4033
    """
4034
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
4035

    
4036
    if (self.op.master_candidate is not None or
4037
        self.op.drained is not None or
4038
        self.op.offline is not None):
4039
      # we can't change the master's node flags
4040
      if self.op.node_name == self.cfg.GetMasterNode():
4041
        raise errors.OpPrereqError("The master role can be changed"
4042
                                   " only via master-failover",
4043
                                   errors.ECODE_INVAL)
4044

    
4045
    if self.op.master_candidate and not node.master_capable:
4046
      raise errors.OpPrereqError("Node %s is not master capable, cannot make"
4047
                                 " it a master candidate" % node.name,
4048
                                 errors.ECODE_STATE)
4049

    
4050
    if self.op.vm_capable == False:
4051
      (ipri, isec) = self.cfg.GetNodeInstances(self.op.node_name)
4052
      if ipri or isec:
4053
        raise errors.OpPrereqError("Node %s hosts instances, cannot unset"
4054
                                   " the vm_capable flag" % node.name,
4055
                                   errors.ECODE_STATE)
4056

    
4057
    if node.master_candidate and self.might_demote and not self.lock_all:
4058
      assert not self.op.auto_promote, "auto-promote set but lock_all not"
4059
      # check if after removing the current node, we're missing master
4060
      # candidates
4061
      (mc_remaining, mc_should, _) = \
4062
          self.cfg.GetMasterCandidateStats(exceptions=[node.name])
4063
      if mc_remaining < mc_should:
4064
        raise errors.OpPrereqError("Not enough master candidates, please"
4065
                                   " pass auto_promote to allow promotion",
4066
                                   errors.ECODE_STATE)
4067

    
4068
    self.old_flags = old_flags = (node.master_candidate,
4069
                                  node.drained, node.offline)
4070
    assert old_flags in self._F2R, "Un-handled old flags  %s" % str(old_flags)
4071
    self.old_role = old_role = self._F2R[old_flags]
4072

    
4073
    # Check for ineffective changes
4074
    for attr in self._FLAGS:
4075
      if (getattr(self.op, attr) == False and getattr(node, attr) == False):
4076
        self.LogInfo("Ignoring request to unset flag %s, already unset", attr)
4077
        setattr(self.op, attr, None)
4078

    
4079
    # Past this point, any flag change to False means a transition
4080
    # away from the respective state, as only real changes are kept
4081

    
4082
    # If we're being deofflined/drained, we'll MC ourself if needed
4083
    if (self.op.drained == False or self.op.offline == False or
4084
        (self.op.master_capable and not node.master_capable)):
4085
      if _DecideSelfPromotion(self):
4086
        self.op.master_candidate = True
4087
        self.LogInfo("Auto-promoting node to master candidate")
4088

    
4089
    # If we're no longer master capable, we'll demote ourselves from MC
4090
    if self.op.master_capable == False and node.master_candidate:
4091
      self.LogInfo("Demoting from master candidate")
4092
      self.op.master_candidate = False
4093

    
4094
    # Compute new role
4095
    assert [getattr(self.op, attr) for attr in self._FLAGS].count(True) <= 1
4096
    if self.op.master_candidate:
4097
      new_role = self._ROLE_CANDIDATE
4098
    elif self.op.drained:
4099
      new_role = self._ROLE_DRAINED
4100
    elif self.op.offline:
4101
      new_role = self._ROLE_OFFLINE
4102
    elif False in [self.op.master_candidate, self.op.drained, self.op.offline]:
4103
      # False is still in new flags, which means we're un-setting (the
4104
      # only) True flag
4105
      new_role = self._ROLE_REGULAR
4106
    else: # no new flags, nothing, keep old role
4107
      new_role = old_role
4108

    
4109
    self.new_role = new_role
4110

    
4111
    if old_role == self._ROLE_OFFLINE and new_role != old_role:
4112
      # Trying to transition out of offline status
4113
      result = self.rpc.call_version([node.name])[node.name]
4114
      if result.fail_msg:
4115
        raise errors.OpPrereqError("Node %s is being de-offlined but fails"
4116
                                   " to report its version: %s" %
4117
                                   (node.name, result.fail_msg),
4118
                                   errors.ECODE_STATE)
4119
      else:
4120
        self.LogWarning("Transitioning node from offline to online state"
4121
                        " without using re-add. Please make sure the node"
4122
                        " is healthy!")
4123

    
4124
  def Exec(self, feedback_fn):
4125
    """Modifies a node.
4126

4127
    """
4128
    node = self.node
4129
    old_role = self.old_role
4130
    new_role = self.new_role
4131

    
4132
    result = []
4133

    
4134
    for attr in ["master_capable", "vm_capable"]:
4135
      val = getattr(self.op, attr)
4136
      if val is not None:
4137
        setattr(node, attr, val)
4138
        result.append((attr, str(val)))
4139

    
4140
    if new_role != old_role:
4141
      # Tell the node to demote itself, if no longer MC and not offline
4142
      if old_role == self._ROLE_CANDIDATE and new_role != self._ROLE_OFFLINE:
4143
        msg = self.rpc.call_node_demote_from_mc(node.name).fail_msg
4144
        if msg:
4145
          self.LogWarning("Node failed to demote itself: %s", msg)
4146

    
4147
      new_flags = self._R2F[new_role]
4148
      for of, nf, desc in zip(self.old_flags, new_flags, self._FLAGS):
4149
        if of != nf:
4150
          result.append((desc, str(nf)))
4151
      (node.master_candidate, node.drained, node.offline) = new_flags
4152

    
4153
      # we locked all nodes, we adjust the CP before updating this node
4154
      if self.lock_all:
4155
        _AdjustCandidatePool(self, [node.name])
4156

    
4157
    # this will trigger configuration file update, if needed
4158
    self.cfg.Update(node, feedback_fn)
4159

    
4160
    # this will trigger job queue propagation or cleanup if the mc
4161
    # flag changed
4162
    if [old_role, new_role].count(self._ROLE_CANDIDATE) == 1:
4163
      self.context.ReaddNode(node)
4164

    
4165
    return result
4166

    
4167

    
4168
class LUPowercycleNode(NoHooksLU):
4169
  """Powercycles a node.
4170

4171
  """
4172
  _OP_PARAMS = [
4173
    _PNodeName,
4174
    _PForce,
4175
    ]
4176
  REQ_BGL = False
4177

    
4178
  def CheckArguments(self):
4179
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4180
    if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
4181
      raise errors.OpPrereqError("The node is the master and the force"
4182
                                 " parameter was not set",
4183
                                 errors.ECODE_INVAL)
4184

    
4185
  def ExpandNames(self):
4186
    """Locking for PowercycleNode.
4187

4188
    This is a last-resort option and shouldn't block on other
4189
    jobs. Therefore, we grab no locks.
4190

4191
    """
4192
    self.needed_locks = {}
4193

    
4194
  def Exec(self, feedback_fn):
4195
    """Reboots a node.
4196

4197
    """
4198
    result = self.rpc.call_node_powercycle(self.op.node_name,
4199
                                           self.cfg.GetHypervisorType())
4200
    result.Raise("Failed to schedule the reboot")
4201
    return result.payload
4202

    
4203

    
4204
class LUQueryClusterInfo(NoHooksLU):
4205
  """Query cluster configuration.
4206

4207
  """
4208
  REQ_BGL = False
4209

    
4210
  def ExpandNames(self):
4211
    self.needed_locks = {}
4212

    
4213
  def Exec(self, feedback_fn):
4214
    """Return cluster config.
4215

4216
    """
4217
    cluster = self.cfg.GetClusterInfo()
4218
    os_hvp = {}
4219

    
4220
    # Filter just for enabled hypervisors
4221
    for os_name, hv_dict in cluster.os_hvp.items():
4222
      os_hvp[os_name] = {}
4223
      for hv_name, hv_params in hv_dict.items():
4224
        if hv_name in cluster.enabled_hypervisors:
4225
          os_hvp[os_name][hv_name] = hv_params
4226

    
4227
    # Convert ip_family to ip_version
4228
    primary_ip_version = constants.IP4_VERSION
4229
    if cluster.primary_ip_family == netutils.IP6Address.family:
4230
      primary_ip_version = constants.IP6_VERSION
4231

    
4232
    result = {
4233
      "software_version": constants.RELEASE_VERSION,
4234
      "protocol_version": constants.PROTOCOL_VERSION,
4235
      "config_version": constants.CONFIG_VERSION,
4236
      "os_api_version": max(constants.OS_API_VERSIONS),
4237
      "export_version": constants.EXPORT_VERSION,
4238
      "architecture": (platform.architecture()[0], platform.machine()),
4239
      "name": cluster.cluster_name,
4240
      "master": cluster.master_node,
4241
      "default_hypervisor": cluster.enabled_hypervisors[0],
4242
      "enabled_hypervisors": cluster.enabled_hypervisors,
4243
      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
4244
                        for hypervisor_name in cluster.enabled_hypervisors]),
4245
      "os_hvp": os_hvp,
4246
      "beparams": cluster.beparams,
4247
      "osparams": cluster.osparams,
4248
      "nicparams": cluster.nicparams,
4249
      "candidate_pool_size": cluster.candidate_pool_size,
4250
      "master_netdev": cluster.master_netdev,
4251
      "volume_group_name": cluster.volume_group_name,
4252
      "drbd_usermode_helper": cluster.drbd_usermode_helper,
4253
      "file_storage_dir": cluster.file_storage_dir,
4254
      "maintain_node_health": cluster.maintain_node_health,
4255
      "ctime": cluster.ctime,
4256
      "mtime": cluster.mtime,
4257
      "uuid": cluster.uuid,
4258
      "tags": list(cluster.GetTags()),
4259
      "uid_pool": cluster.uid_pool,
4260
      "default_iallocator": cluster.default_iallocator,
4261
      "reserved_lvs": cluster.reserved_lvs,
4262
      "primary_ip_version": primary_ip_version,
4263
      "prealloc_wipe_disks": cluster.prealloc_wipe_disks,
4264
      }
4265

    
4266
    return result
4267

    
4268

    
4269
class LUQueryConfigValues(NoHooksLU):
4270
  """Return configuration values.
4271

4272
  """
4273
  _OP_PARAMS = [_POutputFields]
4274
  REQ_BGL = False
4275
  _FIELDS_DYNAMIC = utils.FieldSet()
4276
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag",
4277
                                  "watcher_pause", "volume_group_name")
4278

    
4279
  def CheckArguments(self):
4280
    _CheckOutputFields(static=self._FIELDS_STATIC,
4281
                       dynamic=self._FIELDS_DYNAMIC,
4282
                       selected=self.op.output_fields)
4283

    
4284
  def ExpandNames(self):
4285
    self.needed_locks = {}
4286

    
4287
  def Exec(self, feedback_fn):
4288
    """Dump a representation of the cluster config to the standard output.
4289

4290
    """
4291
    values = []
4292
    for field in self.op.output_fields:
4293
      if field == "cluster_name":
4294
        entry = self.cfg.GetClusterName()
4295
      elif field == "master_node":
4296
        entry = self.cfg.GetMasterNode()
4297
      elif field == "drain_flag":
4298
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
4299
      elif field == "watcher_pause":
4300
        entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
4301
      elif field == "volume_group_name":
4302
        entry = self.cfg.GetVGName()
4303
      else:
4304
        raise errors.ParameterError(field)
4305
      values.append(entry)
4306
    return values
4307

    
4308

    
4309
class LUActivateInstanceDisks(NoHooksLU):
4310
  """Bring up an instance's disks.
4311

4312
  """
4313
  _OP_PARAMS = [
4314
    _PInstanceName,
4315
    ("ignore_size", False, ht.TBool),
4316
    ]
4317
  REQ_BGL = False
4318

    
4319
  def ExpandNames(self):
4320
    self._ExpandAndLockInstance()
4321
    self.needed_locks[locking.LEVEL_NODE] = []
4322
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4323

    
4324
  def DeclareLocks(self, level):
4325
    if level == locking.LEVEL_NODE:
4326
      self._LockInstancesNodes()
4327

    
4328
  def CheckPrereq(self):
4329
    """Check prerequisites.
4330

4331
    This checks that the instance is in the cluster.
4332

4333
    """
4334
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4335
    assert self.instance is not None, \
4336
      "Cannot retrieve locked instance %s" % self.op.instance_name
4337
    _CheckNodeOnline(self, self.instance.primary_node)
4338

    
4339
  def Exec(self, feedback_fn):
4340
    """Activate the disks.
4341

4342
    """
4343
    disks_ok, disks_info = \
4344
              _AssembleInstanceDisks(self, self.instance,
4345
                                     ignore_size=self.op.ignore_size)
4346
    if not disks_ok:
4347
      raise errors.OpExecError("Cannot activate block devices")
4348

    
4349
    return disks_info
4350

    
4351

    
4352
def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
4353
                           ignore_size=False):
4354
  """Prepare the block devices for an instance.
4355

4356
  This sets up the block devices on all nodes.
4357

4358
  @type lu: L{LogicalUnit}
4359
  @param lu: the logical unit on whose behalf we execute
4360
  @type instance: L{objects.Instance}
4361
  @param instance: the instance for whose disks we assemble
4362
  @type disks: list of L{objects.Disk} or None
4363
  @param disks: which disks to assemble (or all, if None)
4364
  @type ignore_secondaries: boolean
4365
  @param ignore_secondaries: if true, errors on secondary nodes
4366
      won't result in an error return from the function
4367
  @type ignore_size: boolean
4368
  @param ignore_size: if true, the current known size of the disk
4369
      will not be used during the disk activation, useful for cases
4370
      when the size is wrong
4371
  @return: False if the operation failed, otherwise a list of
4372
      (host, instance_visible_name, node_visible_name)
4373
      with the mapping from node devices to instance devices
4374

4375
  """
4376
  device_info = []
4377
  disks_ok = True
4378
  iname = instance.name
4379
  disks = _ExpandCheckDisks(instance, disks)
4380

    
4381
  # With the two passes mechanism we try to reduce the window of
4382
  # opportunity for the race condition of switching DRBD to primary
4383
  # before handshaking occured, but we do not eliminate it
4384

    
4385
  # The proper fix would be to wait (with some limits) until the
4386
  # connection has been made and drbd transitions from WFConnection
4387
  # into any other network-connected state (Connected, SyncTarget,
4388
  # SyncSource, etc.)
4389

    
4390
  # 1st pass, assemble on all nodes in secondary mode
4391
  for inst_disk in disks:
4392
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
4393
      if ignore_size:
4394
        node_disk = node_disk.Copy()
4395
        node_disk.UnsetSize()
4396
      lu.cfg.SetDiskID(node_disk, node)
4397
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
4398
      msg = result.fail_msg
4399
      if msg:
4400
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
4401
                           " (is_primary=False, pass=1): %s",
4402
                           inst_disk.iv_name, node, msg)
4403
        if not ignore_secondaries:
4404
          disks_ok = False
4405

    
4406
  # FIXME: race condition on drbd migration to primary
4407

    
4408
  # 2nd pass, do only the primary node
4409
  for inst_disk in disks:
4410
    dev_path = None
4411

    
4412
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
4413
      if node != instance.primary_node:
4414
        continue
4415
      if ignore_size:
4416
        node_disk = node_disk.Copy()
4417
        node_disk.UnsetSize()
4418
      lu.cfg.SetDiskID(node_disk, node)
4419
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
4420
      msg = result.fail_msg
4421
      if msg:
4422
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
4423
                           " (is_primary=True, pass=2): %s",
4424
                           inst_disk.iv_name, node, msg)
4425
        disks_ok = False
4426
      else:
4427
        dev_path = result.payload
4428

    
4429
    device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
4430

    
4431
  # leave the disks configured for the primary node
4432
  # this is a workaround that would be fixed better by
4433
  # improving the logical/physical id handling
4434
  for disk in disks:
4435
    lu.cfg.SetDiskID(disk, instance.primary_node)
4436

    
4437
  return disks_ok, device_info
4438

    
4439

    
4440
def _StartInstanceDisks(lu, instance, force):
4441
  """Start the disks of an instance.
4442

4443
  """
4444
  disks_ok, _ = _AssembleInstanceDisks(lu, instance,
4445
                                           ignore_secondaries=force)
4446
  if not disks_ok:
4447
    _ShutdownInstanceDisks(lu, instance)
4448
    if force is not None and not force:
4449
      lu.proc.LogWarning("", hint="If the message above refers to a"
4450
                         " secondary node,"
4451
                         " you can retry the operation using '--force'.")
4452
    raise errors.OpExecError("Disk consistency error")
4453

    
4454

    
4455
class LUDeactivateInstanceDisks(NoHooksLU):
4456
  """Shutdown an instance's disks.
4457

4458
  """
4459
  _OP_PARAMS = [
4460
    _PInstanceName,
4461
    ]
4462
  REQ_BGL = False
4463

    
4464
  def ExpandNames(self):
4465
    self._ExpandAndLockInstance()
4466
    self.needed_locks[locking.LEVEL_NODE] = []
4467
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4468

    
4469
  def DeclareLocks(self, level):
4470
    if level == locking.LEVEL_NODE:
4471
      self._LockInstancesNodes()
4472

    
4473
  def CheckPrereq(self):
4474
    """Check prerequisites.
4475

4476
    This checks that the instance is in the cluster.
4477

4478
    """
4479
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4480
    assert self.instance is not None, \
4481
      "Cannot retrieve locked instance %s" % self.op.instance_name
4482

    
4483
  def Exec(self, feedback_fn):
4484
    """Deactivate the disks
4485

4486
    """
4487
    instance = self.instance
4488
    _SafeShutdownInstanceDisks(self, instance)
4489

    
4490

    
4491
def _SafeShutdownInstanceDisks(lu, instance, disks=None):
4492
  """Shutdown block devices of an instance.
4493

4494
  This function checks if an instance is running, before calling
4495
  _ShutdownInstanceDisks.
4496

4497
  """
4498
  _CheckInstanceDown(lu, instance, "cannot shutdown disks")
4499
  _ShutdownInstanceDisks(lu, instance, disks=disks)
4500

    
4501

    
4502
def _ExpandCheckDisks(instance, disks):
4503
  """Return the instance disks selected by the disks list
4504

4505
  @type disks: list of L{objects.Disk} or None
4506
  @param disks: selected disks
4507
  @rtype: list of L{objects.Disk}
4508
  @return: selected instance disks to act on
4509

4510
  """
4511
  if disks is None:
4512
    return instance.disks
4513
  else:
4514
    if not set(disks).issubset(instance.disks):
4515
      raise errors.ProgrammerError("Can only act on disks belonging to the"
4516
                                   " target instance")
4517
    return disks
4518

    
4519

    
4520
def _ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
4521
  """Shutdown block devices of an instance.
4522

4523
  This does the shutdown on all nodes of the instance.
4524

4525
  If the ignore_primary is false, errors on the primary node are
4526
  ignored.
4527

4528
  """
4529
  all_result = True
4530
  disks = _ExpandCheckDisks(instance, disks)
4531

    
4532
  for disk in disks:
4533
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
4534
      lu.cfg.SetDiskID(top_disk, node)
4535
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
4536
      msg = result.fail_msg
4537
      if msg:
4538
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
4539
                      disk.iv_name, node, msg)
4540
        if not ignore_primary or node != instance.primary_node:
4541
          all_result = False
4542
  return all_result
4543

    
4544

    
4545
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
4546
  """Checks if a node has enough free memory.
4547

4548
  This function check if a given node has the needed amount of free
4549
  memory. In case the node has less memory or we cannot get the
4550
  information from the node, this function raise an OpPrereqError
4551
  exception.
4552

4553
  @type lu: C{LogicalUnit}
4554
  @param lu: a logical unit from which we get configuration data
4555
  @type node: C{str}
4556
  @param node: the node to check
4557
  @type reason: C{str}
4558
  @param reason: string to use in the error message
4559
  @type requested: C{int}
4560
  @param requested: the amount of memory in MiB to check for
4561
  @type hypervisor_name: C{str}
4562
  @param hypervisor_name: the hypervisor to ask for memory stats
4563
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
4564
      we cannot check the node
4565

4566
  """
4567
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
4568
  nodeinfo[node].Raise("Can't get data from node %s" % node,
4569
                       prereq=True, ecode=errors.ECODE_ENVIRON)
4570
  free_mem = nodeinfo[node].payload.get('memory_free', None)
4571
  if not isinstance(free_mem, int):
4572
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
4573
                               " was '%s'" % (node, free_mem),
4574
                               errors.ECODE_ENVIRON)
4575
  if requested > free_mem:
4576
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
4577
                               " needed %s MiB, available %s MiB" %
4578
                               (node, reason, requested, free_mem),
4579
                               errors.ECODE_NORES)
4580

    
4581

    
4582
def _CheckNodesFreeDisk(lu, nodenames, requested):
4583
  """Checks if nodes have enough free disk space in the default VG.
4584

4585
  This function check if all given nodes have the needed amount of
4586
  free disk. In case any node has less disk or we cannot get the
4587
  information from the node, this function raise an OpPrereqError
4588
  exception.
4589

4590
  @type lu: C{LogicalUnit}
4591
  @param lu: a logical unit from which we get configuration data
4592
  @type nodenames: C{list}
4593
  @param nodenames: the list of node names to check
4594
  @type requested: C{int}
4595
  @param requested: the amount of disk in MiB to check for
4596
  @raise errors.OpPrereqError: if the node doesn't have enough disk, or
4597
      we cannot check the node
4598

4599
  """
4600
  nodeinfo = lu.rpc.call_node_info(nodenames, lu.cfg.GetVGName(),
4601
                                   lu.cfg.GetHypervisorType())
4602
  for node in nodenames:
4603
    info = nodeinfo[node]
4604
    info.Raise("Cannot get current information from node %s" % node,
4605
               prereq=True, ecode=errors.ECODE_ENVIRON)
4606
    vg_free = info.payload.get("vg_free", None)
4607
    if not isinstance(vg_free, int):
4608
      raise errors.OpPrereqError("Can't compute free disk space on node %s,"
4609
                                 " result was '%s'" % (node, vg_free),
4610
                                 errors.ECODE_ENVIRON)
4611
    if requested > vg_free:
4612
      raise errors.OpPrereqError("Not enough disk space on target node %s:"
4613
                                 " required %d MiB, available %d MiB" %
4614
                                 (node, requested, vg_free),
4615
                                 errors.ECODE_NORES)
4616

    
4617

    
4618
class LUStartupInstance(LogicalUnit):
4619
  """Starts an instance.
4620

4621
  """
4622
  HPATH = "instance-start"
4623
  HTYPE = constants.HTYPE_INSTANCE
4624
  _OP_PARAMS = [
4625
    _PInstanceName,
4626
    _PForce,
4627
    _PIgnoreOfflineNodes,
4628
    ("hvparams", ht.EmptyDict, ht.TDict),
4629
    ("beparams", ht.EmptyDict, ht.TDict),
4630
    ]
4631
  REQ_BGL = False
4632

    
4633
  def CheckArguments(self):
4634
    # extra beparams
4635
    if self.op.beparams:
4636
      # fill the beparams dict
4637
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
4638

    
4639
  def ExpandNames(self):
4640
    self._ExpandAndLockInstance()
4641

    
4642
  def BuildHooksEnv(self):
4643
    """Build hooks env.
4644

4645
    This runs on master, primary and secondary nodes of the instance.
4646

4647
    """
4648
    env = {
4649
      "FORCE": self.op.force,
4650
      }
4651
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4652
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4653
    return env, nl, nl
4654

    
4655
  def CheckPrereq(self):
4656
    """Check prerequisites.
4657

4658
    This checks that the instance is in the cluster.
4659

4660
    """
4661
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4662
    assert self.instance is not None, \
4663
      "Cannot retrieve locked instance %s" % self.op.instance_name
4664

    
4665
    # extra hvparams
4666
    if self.op.hvparams:
4667
      # check hypervisor parameter syntax (locally)
4668
      cluster = self.cfg.GetClusterInfo()
4669
      utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
4670
      filled_hvp = cluster.FillHV(instance)
4671
      filled_hvp.update(self.op.hvparams)
4672
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
4673
      hv_type.CheckParameterSyntax(filled_hvp)
4674
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
4675

    
4676
    self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline
4677

    
4678
    if self.primary_offline and self.op.ignore_offline_nodes:
4679
      self.proc.LogWarning("Ignoring offline primary node")
4680

    
4681
      if self.op.hvparams or self.op.beparams:
4682
        self.proc.LogWarning("Overridden parameters are ignored")
4683
    else:
4684
      _CheckNodeOnline(self, instance.primary_node)
4685

    
4686
      bep = self.cfg.GetClusterInfo().FillBE(instance)
4687

    
4688
      # check bridges existence
4689
      _CheckInstanceBridgesExist(self, instance)
4690

    
4691
      remote_info = self.rpc.call_instance_info(instance.primary_node,
4692
                                                instance.name,
4693
                                                instance.hypervisor)
4694
      remote_info.Raise("Error checking node %s" % instance.primary_node,
4695
                        prereq=True, ecode=errors.ECODE_ENVIRON)
4696
      if not remote_info.payload: # not running already
4697
        _CheckNodeFreeMemory(self, instance.primary_node,
4698
                             "starting instance %s" % instance.name,
4699
                             bep[constants.BE_MEMORY], instance.hypervisor)
4700

    
4701
  def Exec(self, feedback_fn):
4702
    """Start the instance.
4703

4704
    """
4705
    instance = self.instance
4706
    force = self.op.force
4707

    
4708
    self.cfg.MarkInstanceUp(instance.name)
4709

    
4710
    if self.primary_offline:
4711
      assert self.op.ignore_offline_nodes
4712
      self.proc.LogInfo("Primary node offline, marked instance as started")
4713
    else:
4714
      node_current = instance.primary_node
4715

    
4716
      _StartInstanceDisks(self, instance, force)
4717

    
4718
      result = self.rpc.call_instance_start(node_current, instance,
4719
                                            self.op.hvparams, self.op.beparams)
4720
      msg = result.fail_msg
4721
      if msg:
4722
        _ShutdownInstanceDisks(self, instance)
4723
        raise errors.OpExecError("Could not start instance: %s" % msg)
4724

    
4725

    
4726
class LURebootInstance(LogicalUnit):
4727
  """Reboot an instance.
4728

4729
  """
4730
  HPATH = "instance-reboot"
4731
  HTYPE = constants.HTYPE_INSTANCE
4732
  _OP_PARAMS = [
4733
    _PInstanceName,
4734
    ("ignore_secondaries", False, ht.TBool),
4735
    ("reboot_type", ht.NoDefault, ht.TElemOf(constants.REBOOT_TYPES)),
4736
    _PShutdownTimeout,
4737
    ]
4738
  REQ_BGL = False
4739

    
4740
  def ExpandNames(self):
4741
    self._ExpandAndLockInstance()
4742

    
4743
  def BuildHooksEnv(self):
4744
    """Build hooks env.
4745

4746
    This runs on master, primary and secondary nodes of the instance.
4747

4748
    """
4749
    env = {
4750
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
4751
      "REBOOT_TYPE": self.op.reboot_type,
4752
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
4753
      }
4754
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4755
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4756
    return env, nl, nl
4757

    
4758
  def CheckPrereq(self):
4759
    """Check prerequisites.
4760

4761
    This checks that the instance is in the cluster.
4762

4763
    """
4764
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4765
    assert self.instance is not None, \
4766
      "Cannot retrieve locked instance %s" % self.op.instance_name
4767

    
4768
    _CheckNodeOnline(self, instance.primary_node)
4769

    
4770
    # check bridges existence
4771
    _CheckInstanceBridgesExist(self, instance)
4772

    
4773
  def Exec(self, feedback_fn):
4774
    """Reboot the instance.
4775

4776
    """
4777
    instance = self.instance
4778
    ignore_secondaries = self.op.ignore_secondaries
4779
    reboot_type = self.op.reboot_type
4780

    
4781
    node_current = instance.primary_node
4782

    
4783
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
4784
                       constants.INSTANCE_REBOOT_HARD]:
4785
      for disk in instance.disks:
4786
        self.cfg.SetDiskID(disk, node_current)
4787
      result = self.rpc.call_instance_reboot(node_current, instance,
4788
                                             reboot_type,
4789
                                             self.op.shutdown_timeout)
4790
      result.Raise("Could not reboot instance")
4791
    else:
4792
      result = self.rpc.call_instance_shutdown(node_current, instance,
4793
                                               self.op.shutdown_timeout)
4794
      result.Raise("Could not shutdown instance for full reboot")
4795
      _ShutdownInstanceDisks(self, instance)
4796
      _StartInstanceDisks(self, instance, ignore_secondaries)
4797
      result = self.rpc.call_instance_start(node_current, instance, None, None)
4798
      msg = result.fail_msg
4799
      if msg:
4800
        _ShutdownInstanceDisks(self, instance)
4801
        raise errors.OpExecError("Could not start instance for"
4802
                                 " full reboot: %s" % msg)
4803

    
4804
    self.cfg.MarkInstanceUp(instance.name)
4805

    
4806

    
4807
class LUShutdownInstance(LogicalUnit):
4808
  """Shutdown an instance.
4809

4810
  """
4811
  HPATH = "instance-stop"
4812
  HTYPE = constants.HTYPE_INSTANCE
4813
  _OP_PARAMS = [
4814
    _PInstanceName,
4815
    _PIgnoreOfflineNodes,
4816
    ("timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT, ht.TPositiveInt),
4817
    ]
4818
  REQ_BGL = False
4819

    
4820
  def ExpandNames(self):
4821
    self._ExpandAndLockInstance()
4822

    
4823
  def BuildHooksEnv(self):
4824
    """Build hooks env.
4825

4826
    This runs on master, primary and secondary nodes of the instance.
4827

4828
    """
4829
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4830
    env["TIMEOUT"] = self.op.timeout
4831
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4832
    return env, nl, nl
4833

    
4834
  def CheckPrereq(self):
4835
    """Check prerequisites.
4836

4837
    This checks that the instance is in the cluster.
4838

4839
    """
4840
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4841
    assert self.instance is not None, \
4842
      "Cannot retrieve locked instance %s" % self.op.instance_name
4843

    
4844
    self.primary_offline = \
4845
      self.cfg.GetNodeInfo(self.instance.primary_node).offline
4846

    
4847
    if self.primary_offline and self.op.ignore_offline_nodes:
4848
      self.proc.LogWarning("Ignoring offline primary node")
4849
    else:
4850
      _CheckNodeOnline(self, self.instance.primary_node)
4851

    
4852
  def Exec(self, feedback_fn):
4853
    """Shutdown the instance.
4854

4855
    """
4856
    instance = self.instance
4857
    node_current = instance.primary_node
4858
    timeout = self.op.timeout
4859

    
4860
    self.cfg.MarkInstanceDown(instance.name)
4861

    
4862
    if self.primary_offline:
4863
      assert self.op.ignore_offline_nodes
4864
      self.proc.LogInfo("Primary node offline, marked instance as stopped")
4865
    else:
4866
      result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
4867
      msg = result.fail_msg
4868
      if msg:
4869
        self.proc.LogWarning("Could not shutdown instance: %s" % msg)
4870

    
4871
      _ShutdownInstanceDisks(self, instance)
4872

    
4873

    
4874
class LUReinstallInstance(LogicalUnit):
4875
  """Reinstall an instance.
4876

4877
  """
4878
  HPATH = "instance-reinstall"
4879
  HTYPE = constants.HTYPE_INSTANCE
4880
  _OP_PARAMS = [
4881
    _PInstanceName,
4882
    ("os_type", None, ht.TMaybeString),
4883
    ("force_variant", False, ht.TBool),
4884
    ("osparams", None, ht.TOr(ht.TDict, ht.TNone)),
4885
    ]
4886
  REQ_BGL = False
4887

    
4888
  def ExpandNames(self):
4889
    self._ExpandAndLockInstance()
4890

    
4891
  def BuildHooksEnv(self):
4892
    """Build hooks env.
4893

4894
    This runs on master, primary and secondary nodes of the instance.
4895

4896
    """
4897
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4898
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4899
    return env, nl, nl
4900

    
4901
  def CheckPrereq(self):
4902
    """Check prerequisites.
4903

4904
    This checks that the instance is in the cluster and is not running.
4905

4906
    """
4907
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4908
    assert instance is not None, \
4909
      "Cannot retrieve locked instance %s" % self.op.instance_name
4910
    _CheckNodeOnline(self, instance.primary_node)
4911

    
4912
    if instance.disk_template == constants.DT_DISKLESS:
4913
      raise errors.OpPrereqError("Instance '%s' has no disks" %
4914
                                 self.op.instance_name,
4915
                                 errors.ECODE_INVAL)
4916
    _CheckInstanceDown(self, instance, "cannot reinstall")
4917

    
4918
    if self.op.os_type is not None:
4919
      # OS verification
4920
      pnode = _ExpandNodeName(self.cfg, instance.primary_node)
4921
      _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
4922
      instance_os = self.op.os_type
4923
    else:
4924
      instance_os = instance.os
4925

    
4926
    nodelist = list(instance.all_nodes)
4927

    
4928
    if self.op.osparams:
4929
      i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
4930
      _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
4931
      self.os_inst = i_osdict # the new dict (without defaults)
4932
    else:
4933
      self.os_inst = None
4934

    
4935
    self.instance = instance
4936

    
4937
  def Exec(self, feedback_fn):
4938
    """Reinstall the instance.
4939

4940
    """
4941
    inst = self.instance
4942

    
4943
    if self.op.os_type is not None:
4944
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
4945
      inst.os = self.op.os_type
4946
      # Write to configuration
4947
      self.cfg.Update(inst, feedback_fn)
4948

    
4949
    _StartInstanceDisks(self, inst, None)
4950
    try:
4951
      feedback_fn("Running the instance OS create scripts...")
4952
      # FIXME: pass debug option from opcode to backend
4953
      result = self.rpc.call_instance_os_add(inst.primary_node, inst, True,
4954
                                             self.op.debug_level,
4955
                                             osparams=self.os_inst)
4956
      result.Raise("Could not install OS for instance %s on node %s" %
4957
                   (inst.name, inst.primary_node))
4958
    finally:
4959
      _ShutdownInstanceDisks(self, inst)
4960

    
4961

    
4962
class LURecreateInstanceDisks(LogicalUnit):
4963
  """Recreate an instance's missing disks.
4964

4965
  """
4966
  HPATH = "instance-recreate-disks"
4967
  HTYPE = constants.HTYPE_INSTANCE
4968
  _OP_PARAMS = [
4969
    _PInstanceName,
4970
    ("disks", ht.EmptyList, ht.TListOf(ht.TPositiveInt)),
4971
    ]
4972
  REQ_BGL = False
4973

    
4974
  def ExpandNames(self):
4975
    self._ExpandAndLockInstance()
4976

    
4977
  def BuildHooksEnv(self):
4978
    """Build hooks env.
4979

4980
    This runs on master, primary and secondary nodes of the instance.
4981

4982
    """
4983
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4984
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4985
    return env, nl, nl
4986

    
4987
  def CheckPrereq(self):
4988
    """Check prerequisites.
4989

4990
    This checks that the instance is in the cluster and is not running.
4991

4992
    """
4993
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4994
    assert instance is not None, \
4995
      "Cannot retrieve locked instance %s" % self.op.instance_name
4996
    _CheckNodeOnline(self, instance.primary_node)
4997

    
4998
    if instance.disk_template == constants.DT_DISKLESS:
4999
      raise errors.OpPrereqError("Instance '%s' has no disks" %
5000
                                 self.op.instance_name, errors.ECODE_INVAL)
5001
    _CheckInstanceDown(self, instance, "cannot recreate disks")
5002

    
5003
    if not self.op.disks:
5004
      self.op.disks = range(len(instance.disks))
5005
    else:
5006
      for idx in self.op.disks:
5007
        if idx >= len(instance.disks):
5008
          raise errors.OpPrereqError("Invalid disk index passed '%s'" % idx,
5009
                                     errors.ECODE_INVAL)
5010

    
5011
    self.instance = instance
5012

    
5013
  def Exec(self, feedback_fn):
5014
    """Recreate the disks.
5015

5016
    """
5017
    to_skip = []
5018
    for idx, _ in enumerate(self.instance.disks):
5019
      if idx not in self.op.disks: # disk idx has not been passed in
5020
        to_skip.append(idx)
5021
        continue
5022

    
5023
    _CreateDisks(self, self.instance, to_skip=to_skip)
5024

    
5025

    
5026
class LURenameInstance(LogicalUnit):
5027
  """Rename an instance.
5028

5029
  """
5030
  HPATH = "instance-rename"
5031
  HTYPE = constants.HTYPE_INSTANCE
5032
  _OP_PARAMS = [
5033
    _PInstanceName,
5034
    ("new_name", ht.NoDefault, ht.TNonEmptyString),
5035
    ("ip_check", False, ht.TBool),
5036
    ("name_check", True, ht.TBool),
5037
    ]
5038

    
5039
  def CheckArguments(self):
5040
    """Check arguments.
5041

5042
    """
5043
    if self.op.ip_check and not self.op.name_check:
5044
      # TODO: make the ip check more flexible and not depend on the name check
5045
      raise errors.OpPrereqError("Cannot do ip check without a name check",
5046
                                 errors.ECODE_INVAL)
5047

    
5048
  def BuildHooksEnv(self):
5049
    """Build hooks env.
5050

5051
    This runs on master, primary and secondary nodes of the instance.
5052

5053
    """
5054
    env = _BuildInstanceHookEnvByObject(self, self.instance)
5055
    env["INSTANCE_NEW_NAME"] = self.op.new_name
5056
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5057
    return env, nl, nl
5058

    
5059
  def CheckPrereq(self):
5060
    """Check prerequisites.
5061

5062
    This checks that the instance is in the cluster and is not running.
5063

5064
    """
5065
    self.op.instance_name = _ExpandInstanceName(self.cfg,
5066
                                                self.op.instance_name)
5067
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5068
    assert instance is not None
5069
    _CheckNodeOnline(self, instance.primary_node)
5070
    _CheckInstanceDown(self, instance, "cannot rename")
5071
    self.instance = instance
5072

    
5073
    new_name = self.op.new_name
5074
    if self.op.name_check:
5075
      hostname = netutils.GetHostname(name=new_name)
5076
      new_name = self.op.new_name = hostname.name
5077
      if (self.op.ip_check and
5078
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
5079
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
5080
                                   (hostname.ip, new_name),
5081
                                   errors.ECODE_NOTUNIQUE)
5082

    
5083
    instance_list = self.cfg.GetInstanceList()
5084
    if new_name in instance_list:
5085
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
5086
                                 new_name, errors.ECODE_EXISTS)
5087

    
5088
  def Exec(self, feedback_fn):
5089
    """Reinstall the instance.
5090

5091
    """
5092
    inst = self.instance
5093
    old_name = inst.name
5094

    
5095
    if inst.disk_template == constants.DT_FILE:
5096
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
5097

    
5098
    self.cfg.RenameInstance(inst.name, self.op.new_name)
5099
    # Change the instance lock. This is definitely safe while we hold the BGL
5100
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
5101
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
5102

    
5103
    # re-read the instance from the configuration after rename
5104
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
5105

    
5106
    if inst.disk_template == constants.DT_FILE:
5107
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
5108
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
5109
                                                     old_file_storage_dir,
5110
                                                     new_file_storage_dir)
5111
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
5112
                   " (but the instance has been renamed in Ganeti)" %
5113
                   (inst.primary_node, old_file_storage_dir,
5114
                    new_file_storage_dir))
5115

    
5116
    _StartInstanceDisks(self, inst, None)
5117
    try:
5118
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
5119
                                                 old_name, self.op.debug_level)
5120
      msg = result.fail_msg
5121
      if msg:
5122
        msg = ("Could not run OS rename script for instance %s on node %s"
5123
               " (but the instance has been renamed in Ganeti): %s" %
5124
               (inst.name, inst.primary_node, msg))
5125
        self.proc.LogWarning(msg)
5126
    finally:
5127
      _ShutdownInstanceDisks(self, inst)
5128

    
5129
    return inst.name
5130

    
5131

    
5132
class LURemoveInstance(LogicalUnit):
5133
  """Remove an instance.
5134

5135
  """
5136
  HPATH = "instance-remove"
5137
  HTYPE = constants.HTYPE_INSTANCE
5138
  _OP_PARAMS = [
5139
    _PInstanceName,
5140
    ("ignore_failures", False, ht.TBool),
5141
    _PShutdownTimeout,
5142
    ]
5143
  REQ_BGL = False
5144

    
5145
  def ExpandNames(self):
5146
    self._ExpandAndLockInstance()
5147
    self.needed_locks[locking.LEVEL_NODE] = []
5148
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5149

    
5150
  def DeclareLocks(self, level):
5151
    if level == locking.LEVEL_NODE:
5152
      self._LockInstancesNodes()
5153

    
5154
  def BuildHooksEnv(self):
5155
    """Build hooks env.
5156

5157
    This runs on master, primary and secondary nodes of the instance.
5158

5159
    """
5160
    env = _BuildInstanceHookEnvByObject(self, self.instance)
5161
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
5162
    nl = [self.cfg.GetMasterNode()]
5163
    nl_post = list(self.instance.all_nodes) + nl
5164
    return env, nl, nl_post
5165

    
5166
  def CheckPrereq(self):
5167
    """Check prerequisites.
5168

5169
    This checks that the instance is in the cluster.
5170

5171
    """
5172
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5173
    assert self.instance is not None, \
5174
      "Cannot retrieve locked instance %s" % self.op.instance_name
5175

    
5176
  def Exec(self, feedback_fn):
5177
    """Remove the instance.
5178

5179
    """
5180
    instance = self.instance
5181
    logging.info("Shutting down instance %s on node %s",
5182
                 instance.name, instance.primary_node)
5183

    
5184
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
5185
                                             self.op.shutdown_timeout)
5186
    msg = result.fail_msg
5187
    if msg:
5188
      if self.op.ignore_failures:
5189
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
5190
      else:
5191
        raise errors.OpExecError("Could not shutdown instance %s on"
5192
                                 " node %s: %s" %
5193
                                 (instance.name, instance.primary_node, msg))
5194

    
5195
    _RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
5196

    
5197

    
5198
def _RemoveInstance(lu, feedback_fn, instance, ignore_failures):
5199
  """Utility function to remove an instance.
5200

5201
  """
5202
  logging.info("Removing block devices for instance %s", instance.name)
5203

    
5204
  if not _RemoveDisks(lu, instance):
5205
    if not ignore_failures:
5206
      raise errors.OpExecError("Can't remove instance's disks")
5207
    feedback_fn("Warning: can't remove instance's disks")
5208

    
5209
  logging.info("Removing instance %s out of cluster config", instance.name)
5210

    
5211
  lu.cfg.RemoveInstance(instance.name)
5212

    
5213
  assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
5214
    "Instance lock removal conflict"
5215

    
5216
  # Remove lock for the instance
5217
  lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
5218

    
5219

    
5220
class LUQueryInstances(NoHooksLU):
5221
  """Logical unit for querying instances.
5222

5223
  """
5224
  # pylint: disable-msg=W0142
5225
  _OP_PARAMS = [
5226
    ("output_fields", ht.NoDefault, ht.TListOf(ht.TNonEmptyString)),
5227
    ("names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString)),
5228
    ("use_locking", False, ht.TBool),
5229
    ]
5230
  REQ_BGL = False
5231
  _SIMPLE_FIELDS = ["name", "os", "network_port", "hypervisor",
5232
                    "serial_no", "ctime", "mtime", "uuid"]
5233
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
5234
                                    "admin_state",
5235
                                    "disk_template", "ip", "mac", "bridge",
5236
                                    "nic_mode", "nic_link",
5237
                                    "sda_size", "sdb_size", "vcpus", "tags",
5238
                                    "network_port", "beparams",
5239
                                    r"(disk)\.(size)/([0-9]+)",
5240
                                    r"(disk)\.(sizes)", "disk_usage",
5241
                                    r"(nic)\.(mac|ip|mode|link)/([0-9]+)",
5242
                                    r"(nic)\.(bridge)/([0-9]+)",
5243
                                    r"(nic)\.(macs|ips|modes|links|bridges)",
5244
                                    r"(disk|nic)\.(count)",
5245
                                    "hvparams", "custom_hvparams",
5246
                                    "custom_beparams", "custom_nicparams",
5247
                                    ] + _SIMPLE_FIELDS +
5248
                                  ["hv/%s" % name
5249
                                   for name in constants.HVS_PARAMETERS
5250
                                   if name not in constants.HVC_GLOBALS] +
5251
                                  ["be/%s" % name
5252
                                   for name in constants.BES_PARAMETERS])
5253
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state",
5254
                                   "oper_ram",
5255
                                   "oper_vcpus",
5256
                                   "status")
5257

    
5258

    
5259
  def CheckArguments(self):
5260
    _CheckOutputFields(static=self._FIELDS_STATIC,
5261
                       dynamic=self._FIELDS_DYNAMIC,
5262
                       selected=self.op.output_fields)
5263

    
5264
  def ExpandNames(self):
5265
    self.needed_locks = {}
5266
    self.share_locks[locking.LEVEL_INSTANCE] = 1
5267
    self.share_locks[locking.LEVEL_NODE] = 1
5268

    
5269
    if self.op.names:
5270
      self.wanted = _GetWantedInstances(self, self.op.names)
5271
    else:
5272
      self.wanted = locking.ALL_SET
5273

    
5274
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
5275
    self.do_locking = self.do_node_query and self.op.use_locking
5276
    if self.do_locking:
5277
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
5278
      self.needed_locks[locking.LEVEL_NODE] = []
5279
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5280

    
5281
  def DeclareLocks(self, level):
5282
    if level == locking.LEVEL_NODE and self.do_locking:
5283
      self._LockInstancesNodes()
5284

    
5285
  def Exec(self, feedback_fn):
5286
    """Computes the list of nodes and their attributes.
5287

5288
    """
5289
    # pylint: disable-msg=R0912
5290
    # way too many branches here
5291
    all_info = self.cfg.GetAllInstancesInfo()
5292
    if self.wanted == locking.ALL_SET:
5293
      # caller didn't specify instance names, so ordering is not important
5294
      if self.do_locking:
5295
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
5296
      else:
5297
        instance_names = all_info.keys()
5298
      instance_names = utils.NiceSort(instance_names)
5299
    else:
5300
      # caller did specify names, so we must keep the ordering
5301
      if self.do_locking:
5302
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
5303
      else:
5304
        tgt_set = all_info.keys()
5305
      missing = set(self.wanted).difference(tgt_set)
5306
      if missing:
5307
        raise errors.OpExecError("Some instances were removed before"
5308
                                 " retrieving their data: %s" % missing)
5309
      instance_names = self.wanted
5310

    
5311
    instance_list = [all_info[iname] for iname in instance_names]
5312

    
5313
    # begin data gathering
5314

    
5315
    nodes = frozenset([inst.primary_node for inst in instance_list])
5316
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
5317

    
5318
    bad_nodes = []
5319
    off_nodes = []
5320
    if self.do_node_query:
5321
      live_data = {}
5322
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
5323
      for name in nodes:
5324
        result = node_data[name]
5325
        if result.offline:
5326
          # offline nodes will be in both lists
5327
          off_nodes.append(name)
5328
        if result.fail_msg:
5329
          bad_nodes.append(name)
5330
        else:
5331
          if result.payload:
5332
            live_data.update(result.payload)
5333
          # else no instance is alive
5334
    else:
5335
      live_data = dict([(name, {}) for name in instance_names])
5336

    
5337
    # end data gathering
5338

    
5339
    HVPREFIX = "hv/"
5340
    BEPREFIX = "be/"
5341
    output = []
5342
    cluster = self.cfg.GetClusterInfo()
5343
    for instance in instance_list:
5344
      iout = []
5345
      i_hv = cluster.FillHV(instance, skip_globals=True)
5346
      i_be = cluster.FillBE(instance)
5347
      i_nicp = [cluster.SimpleFillNIC(nic.nicparams) for nic in instance.nics]
5348
      for field in self.op.output_fields:
5349
        st_match = self._FIELDS_STATIC.Matches(field)
5350
        if field in self._SIMPLE_FIELDS:
5351
          val = getattr(instance, field)
5352
        elif field == "pnode":
5353
          val = instance.primary_node
5354
        elif field == "snodes":
5355
          val = list(instance.secondary_nodes)
5356
        elif field == "admin_state":
5357
          val = instance.admin_up
5358
        elif field == "oper_state":
5359
          if instance.primary_node in bad_nodes:
5360
            val = None
5361
          else:
5362
            val = bool(live_data.get(instance.name))
5363
        elif field == "status":
5364
          if instance.primary_node in off_nodes:
5365
            val = "ERROR_nodeoffline"
5366
          elif instance.primary_node in bad_nodes:
5367
            val = "ERROR_nodedown"
5368
          else:
5369
            running = bool(live_data.get(instance.name))
5370
            if running:
5371
              if instance.admin_up:
5372
                val = "running"
5373
              else:
5374
                val = "ERROR_up"
5375
            else:
5376
              if instance.admin_up:
5377
                val = "ERROR_down"
5378
              else:
5379
                val = "ADMIN_down"
5380
        elif field == "oper_ram":
5381
          if instance.primary_node in bad_nodes:
5382
            val = None
5383
          elif instance.name in live_data:
5384
            val = live_data[instance.name].get("memory", "?")
5385
          else:
5386
            val = "-"
5387
        elif field == "oper_vcpus":
5388
          if instance.primary_node in bad_nodes:
5389
            val = None
5390
          elif instance.name in live_data:
5391
            val = live_data[instance.name].get("vcpus", "?")
5392
          else:
5393
            val = "-"
5394
        elif field == "vcpus":
5395
          val = i_be[constants.BE_VCPUS]
5396
        elif field == "disk_template":
5397
          val = instance.disk_template
5398
        elif field == "ip":
5399
          if instance.nics:
5400
            val = instance.nics[0].ip
5401
          else:
5402
            val = None
5403
        elif field == "nic_mode":
5404
          if instance.nics:
5405
            val = i_nicp[0][constants.NIC_MODE]
5406
          else:
5407
            val = None
5408
        elif field == "nic_link":
5409
          if instance.nics:
5410
            val = i_nicp[0][constants.NIC_LINK]
5411
          else:
5412
            val = None
5413
        elif field == "bridge":
5414
          if (instance.nics and
5415
              i_nicp[0][constants.NIC_MODE] == constants.NIC_MODE_BRIDGED):
5416
            val = i_nicp[0][constants.NIC_LINK]
5417
          else:
5418
            val = None
5419
        elif field == "mac":
5420
          if instance.nics:
5421
            val = instance.nics[0].mac
5422
          else:
5423
            val = None
5424
        elif field == "custom_nicparams":
5425
          val = [nic.nicparams for nic in instance.nics]
5426
        elif field == "sda_size" or field == "sdb_size":
5427
          idx = ord(field[2]) - ord('a')
5428
          try:
5429
            val = instance.FindDisk(idx).size
5430
          except errors.OpPrereqError:
5431
            val = None
5432
        elif field == "disk_usage": # total disk usage per node
5433
          disk_sizes = [{'size': disk.size} for disk in instance.disks]
5434
          val = _ComputeDiskSize(instance.disk_template, disk_sizes)
5435
        elif field == "tags":
5436
          val = list(instance.GetTags())
5437
        elif field == "custom_hvparams":
5438
          val = instance.hvparams # not filled!
5439
        elif field == "hvparams":
5440
          val = i_hv
5441
        elif (field.startswith(HVPREFIX) and
5442
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS and
5443
              field[len(HVPREFIX):] not in constants.HVC_GLOBALS):
5444
          val = i_hv.get(field[len(HVPREFIX):], None)
5445
        elif field == "custom_beparams":
5446
          val = instance.beparams
5447
        elif field == "beparams":
5448
          val = i_be
5449
        elif (field.startswith(BEPREFIX) and
5450
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
5451
          val = i_be.get(field[len(BEPREFIX):], None)
5452
        elif st_match and st_match.groups():
5453
          # matches a variable list
5454
          st_groups = st_match.groups()
5455
          if st_groups and st_groups[0] == "disk":
5456
            if st_groups[1] == "count":
5457
              val = len(instance.disks)
5458
            elif st_groups[1] == "sizes":
5459
              val = [disk.size for disk in instance.disks]
5460
            elif st_groups[1] == "size":
5461
              try:
5462
                val = instance.FindDisk(st_groups[2]).size
5463
              except errors.OpPrereqError:
5464
                val = None
5465
            else:
5466
              assert False, "Unhandled disk parameter"
5467
          elif st_groups[0] == "nic":
5468
            if st_groups[1] == "count":
5469
              val = len(instance.nics)
5470
            elif st_groups[1] == "macs":
5471
              val = [nic.mac for nic in instance.nics]
5472
            elif st_groups[1] == "ips":
5473
              val = [nic.ip for nic in instance.nics]
5474
            elif st_groups[1] == "modes":
5475
              val = [nicp[constants.NIC_MODE] for nicp in i_nicp]
5476
            elif st_groups[1] == "links":
5477
              val = [nicp[constants.NIC_LINK] for nicp in i_nicp]
5478
            elif st_groups[1] == "bridges":
5479
              val = []
5480
              for nicp in i_nicp:
5481
                if nicp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
5482
                  val.append(nicp[constants.NIC_LINK])
5483
                else:
5484
                  val.append(None)
5485
            else:
5486
              # index-based item
5487
              nic_idx = int(st_groups[2])
5488
              if nic_idx >= len(instance.nics):
5489
                val = None
5490
              else:
5491
                if st_groups[1] == "mac":
5492
                  val = instance.nics[nic_idx].mac
5493
                elif st_groups[1] == "ip":
5494
                  val = instance.nics[nic_idx].ip
5495
                elif st_groups[1] == "mode":
5496
                  val = i_nicp[nic_idx][constants.NIC_MODE]
5497
                elif st_groups[1] == "link":
5498
                  val = i_nicp[nic_idx][constants.NIC_LINK]
5499
                elif st_groups[1] == "bridge":
5500
                  nic_mode = i_nicp[nic_idx][constants.NIC_MODE]
5501
                  if nic_mode == constants.NIC_MODE_BRIDGED:
5502
                    val = i_nicp[nic_idx][constants.NIC_LINK]
5503
                  else:
5504
                    val = None
5505
                else:
5506
                  assert False, "Unhandled NIC parameter"
5507
          else:
5508
            assert False, ("Declared but unhandled variable parameter '%s'" %
5509
                           field)
5510
        else:
5511
          assert False, "Declared but unhandled parameter '%s'" % field
5512
        iout.append(val)
5513
      output.append(iout)
5514

    
5515
    return output
5516

    
5517

    
5518
class LUFailoverInstance(LogicalUnit):
5519
  """Failover an instance.
5520

5521
  """
5522
  HPATH = "instance-failover"
5523
  HTYPE = constants.HTYPE_INSTANCE
5524
  _OP_PARAMS = [
5525
    _PInstanceName,
5526
    ("ignore_consistency", False, ht.TBool),
5527
    _PShutdownTimeout,
5528
    ]
5529
  REQ_BGL = False
5530

    
5531
  def ExpandNames(self):
5532
    self._ExpandAndLockInstance()
5533
    self.needed_locks[locking.LEVEL_NODE] = []
5534
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5535

    
5536
  def DeclareLocks(self, level):
5537
    if level == locking.LEVEL_NODE:
5538
      self._LockInstancesNodes()
5539

    
5540
  def BuildHooksEnv(self):
5541
    """Build hooks env.
5542

5543
    This runs on master, primary and secondary nodes of the instance.
5544

5545
    """
5546
    instance = self.instance
5547
    source_node = instance.primary_node
5548
    target_node = instance.secondary_nodes[0]
5549
    env = {
5550
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
5551
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5552
      "OLD_PRIMARY": source_node,
5553
      "OLD_SECONDARY": target_node,
5554
      "NEW_PRIMARY": target_node,
5555
      "NEW_SECONDARY": source_node,
5556
      }
5557
    env.update(_BuildInstanceHookEnvByObject(self, instance))
5558
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
5559
    nl_post = list(nl)
5560
    nl_post.append(source_node)
5561
    return env, nl, nl_post
5562

    
5563
  def CheckPrereq(self):
5564
    """Check prerequisites.
5565

5566
    This checks that the instance is in the cluster.
5567

5568
    """
5569
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5570
    assert self.instance is not None, \
5571
      "Cannot retrieve locked instance %s" % self.op.instance_name
5572

    
5573
    bep = self.cfg.GetClusterInfo().FillBE(instance)
5574
    if instance.disk_template not in constants.DTS_NET_MIRROR:
5575
      raise errors.OpPrereqError("Instance's disk layout is not"
5576
                                 " network mirrored, cannot failover.",
5577
                                 errors.ECODE_STATE)
5578

    
5579
    secondary_nodes = instance.secondary_nodes
5580
    if not secondary_nodes:
5581
      raise errors.ProgrammerError("no secondary node but using "
5582
                                   "a mirrored disk template")
5583

    
5584
    target_node = secondary_nodes[0]
5585
    _CheckNodeOnline(self, target_node)
5586
    _CheckNodeNotDrained(self, target_node)
5587
    if instance.admin_up:
5588
      # check memory requirements on the secondary node
5589
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
5590
                           instance.name, bep[constants.BE_MEMORY],
5591
                           instance.hypervisor)
5592
    else:
5593
      self.LogInfo("Not checking memory on the secondary node as"
5594
                   " instance will not be started")
5595

    
5596
    # check bridge existance
5597
    _CheckInstanceBridgesExist(self, instance, node=target_node)
5598

    
5599
  def Exec(self, feedback_fn):
5600
    """Failover an instance.
5601

5602
    The failover is done by shutting it down on its present node and
5603
    starting it on the secondary.
5604

5605
    """
5606
    instance = self.instance
5607
    primary_node = self.cfg.GetNodeInfo(instance.primary_node)
5608

    
5609
    source_node = instance.primary_node
5610
    target_node = instance.secondary_nodes[0]
5611

    
5612
    if instance.admin_up:
5613
      feedback_fn("* checking disk consistency between source and target")
5614
      for dev in instance.disks:
5615
        # for drbd, these are drbd over lvm
5616
        if not _CheckDiskConsistency(self, dev, target_node, False):
5617
          if not self.op.ignore_consistency:
5618
            raise errors.OpExecError("Disk %s is degraded on target node,"
5619
                                     " aborting failover." % dev.iv_name)
5620
    else:
5621
      feedback_fn("* not checking disk consistency as instance is not running")
5622

    
5623
    feedback_fn("* shutting down instance on source node")
5624
    logging.info("Shutting down instance %s on node %s",
5625
                 instance.name, source_node)
5626

    
5627
    result = self.rpc.call_instance_shutdown(source_node, instance,
5628
                                             self.op.shutdown_timeout)
5629
    msg = result.fail_msg
5630
    if msg:
5631
      if self.op.ignore_consistency or primary_node.offline:
5632
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
5633
                             " Proceeding anyway. Please make sure node"
5634
                             " %s is down. Error details: %s",
5635
                             instance.name, source_node, source_node, msg)
5636
      else:
5637
        raise errors.OpExecError("Could not shutdown instance %s on"
5638
                                 " node %s: %s" %
5639
                                 (instance.name, source_node, msg))
5640

    
5641
    feedback_fn("* deactivating the instance's disks on source node")
5642
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
5643
      raise errors.OpExecError("Can't shut down the instance's disks.")
5644

    
5645
    instance.primary_node = target_node
5646
    # distribute new instance config to the other nodes
5647
    self.cfg.Update(instance, feedback_fn)
5648

    
5649
    # Only start the instance if it's marked as up
5650
    if instance.admin_up:
5651
      feedback_fn("* activating the instance's disks on target node")
5652
      logging.info("Starting instance %s on node %s",
5653
                   instance.name, target_node)
5654

    
5655
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
5656
                                           ignore_secondaries=True)
5657
      if not disks_ok:
5658
        _ShutdownInstanceDisks(self, instance)
5659
        raise errors.OpExecError("Can't activate the instance's disks")
5660

    
5661
      feedback_fn("* starting the instance on the target node")
5662
      result = self.rpc.call_instance_start(target_node, instance, None, None)
5663
      msg = result.fail_msg
5664
      if msg:
5665
        _ShutdownInstanceDisks(self, instance)
5666
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
5667
                                 (instance.name, target_node, msg))
5668

    
5669

    
5670
class LUMigrateInstance(LogicalUnit):
5671
  """Migrate an instance.
5672

5673
  This is migration without shutting down, compared to the failover,
5674
  which is done with shutdown.
5675

5676
  """
5677
  HPATH = "instance-migrate"
5678
  HTYPE = constants.HTYPE_INSTANCE
5679
  _OP_PARAMS = [
5680
    _PInstanceName,
5681
    _PMigrationMode,
5682
    _PMigrationLive,
5683
    ("cleanup", False, ht.TBool),
5684
    ]
5685

    
5686
  REQ_BGL = False
5687

    
5688
  def ExpandNames(self):
5689
    self._ExpandAndLockInstance()
5690

    
5691
    self.needed_locks[locking.LEVEL_NODE] = []
5692
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5693

    
5694
    self._migrater = TLMigrateInstance(self, self.op.instance_name,
5695
                                       self.op.cleanup)
5696
    self.tasklets = [self._migrater]
5697

    
5698
  def DeclareLocks(self, level):
5699
    if level == locking.LEVEL_NODE:
5700
      self._LockInstancesNodes()
5701

    
5702
  def BuildHooksEnv(self):
5703
    """Build hooks env.
5704

5705
    This runs on master, primary and secondary nodes of the instance.
5706

5707
    """
5708
    instance = self._migrater.instance
5709
    source_node = instance.primary_node
5710
    target_node = instance.secondary_nodes[0]
5711
    env = _BuildInstanceHookEnvByObject(self, instance)
5712
    env["MIGRATE_LIVE"] = self._migrater.live
5713
    env["MIGRATE_CLEANUP"] = self.op.cleanup
5714
    env.update({
5715
        "OLD_PRIMARY": source_node,
5716
        "OLD_SECONDARY": target_node,
5717
        "NEW_PRIMARY": target_node,
5718
        "NEW_SECONDARY": source_node,
5719
        })
5720
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
5721
    nl_post = list(nl)
5722
    nl_post.append(source_node)
5723
    return env, nl, nl_post
5724

    
5725

    
5726
class LUMoveInstance(LogicalUnit):
5727
  """Move an instance by data-copying.
5728

5729
  """
5730
  HPATH = "instance-move"
5731
  HTYPE = constants.HTYPE_INSTANCE
5732
  _OP_PARAMS = [
5733
    _PInstanceName,
5734
    ("target_node", ht.NoDefault, ht.TNonEmptyString),
5735
    _PShutdownTimeout,
5736
    ]
5737
  REQ_BGL = False
5738

    
5739
  def ExpandNames(self):
5740
    self._ExpandAndLockInstance()
5741
    target_node = _ExpandNodeName(self.cfg, self.op.target_node)
5742
    self.op.target_node = target_node
5743
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
5744
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5745

    
5746
  def DeclareLocks(self, level):
5747
    if level == locking.LEVEL_NODE:
5748
      self._LockInstancesNodes(primary_only=True)
5749

    
5750
  def BuildHooksEnv(self):
5751
    """Build hooks env.
5752

5753
    This runs on master, primary and secondary nodes of the instance.
5754

5755
    """
5756
    env = {
5757
      "TARGET_NODE": self.op.target_node,
5758
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5759
      }
5760
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5761
    nl = [self.cfg.GetMasterNode()] + [self.instance.primary_node,
5762
                                       self.op.target_node]
5763
    return env, nl, nl
5764

    
5765
  def CheckPrereq(self):
5766
    """Check prerequisites.
5767

5768
    This checks that the instance is in the cluster.
5769

5770
    """
5771
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5772
    assert self.instance is not None, \
5773
      "Cannot retrieve locked instance %s" % self.op.instance_name
5774

    
5775
    node = self.cfg.GetNodeInfo(self.op.target_node)
5776
    assert node is not None, \
5777
      "Cannot retrieve locked node %s" % self.op.target_node
5778

    
5779
    self.target_node = target_node = node.name
5780

    
5781
    if target_node == instance.primary_node:
5782
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
5783
                                 (instance.name, target_node),
5784
                                 errors.ECODE_STATE)
5785

    
5786
    bep = self.cfg.GetClusterInfo().FillBE(instance)
5787

    
5788
    for idx, dsk in enumerate(instance.disks):
5789
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
5790
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
5791
                                   " cannot copy" % idx, errors.ECODE_STATE)
5792

    
5793
    _CheckNodeOnline(self, target_node)
5794
    _CheckNodeNotDrained(self, target_node)
5795
    _CheckNodeVmCapable(self, target_node)
5796

    
5797
    if instance.admin_up:
5798
      # check memory requirements on the secondary node
5799
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
5800
                           instance.name, bep[constants.BE_MEMORY],
5801
                           instance.hypervisor)
5802
    else:
5803
      self.LogInfo("Not checking memory on the secondary node as"
5804
                   " instance will not be started")
5805

    
5806
    # check bridge existance
5807
    _CheckInstanceBridgesExist(self, instance, node=target_node)
5808

    
5809
  def Exec(self, feedback_fn):
5810
    """Move an instance.
5811

5812
    The move is done by shutting it down on its present node, copying
5813
    the data over (slow) and starting it on the new node.
5814

5815
    """
5816
    instance = self.instance
5817

    
5818
    source_node = instance.primary_node
5819
    target_node = self.target_node
5820

    
5821
    self.LogInfo("Shutting down instance %s on source node %s",
5822
                 instance.name, source_node)
5823

    
5824
    result = self.rpc.call_instance_shutdown(source_node, instance,
5825
                                             self.op.shutdown_timeout)
5826
    msg = result.fail_msg
5827
    if msg:
5828
      if self.op.ignore_consistency:
5829
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
5830
                             " Proceeding anyway. Please make sure node"
5831
                             " %s is down. Error details: %s",
5832
                             instance.name, source_node, source_node, msg)
5833
      else:
5834
        raise errors.OpExecError("Could not shutdown instance %s on"
5835
                                 " node %s: %s" %
5836
                                 (instance.name, source_node, msg))
5837

    
5838
    # create the target disks
5839
    try:
5840
      _CreateDisks(self, instance, target_node=target_node)
5841
    except errors.OpExecError:
5842
      self.LogWarning("Device creation failed, reverting...")
5843
      try:
5844
        _RemoveDisks(self, instance, target_node=target_node)
5845
      finally:
5846
        self.cfg.ReleaseDRBDMinors(instance.name)
5847
        raise
5848

    
5849
    cluster_name = self.cfg.GetClusterInfo().cluster_name
5850

    
5851
    errs = []
5852
    # activate, get path, copy the data over
5853
    for idx, disk in enumerate(instance.disks):
5854
      self.LogInfo("Copying data for disk %d", idx)
5855
      result = self.rpc.call_blockdev_assemble(target_node, disk,
5856
                                               instance.name, True)
5857
      if result.fail_msg:
5858
        self.LogWarning("Can't assemble newly created disk %d: %s",
5859
                        idx, result.fail_msg)
5860
        errs.append(result.fail_msg)
5861
        break
5862
      dev_path = result.payload
5863
      result = self.rpc.call_blockdev_export(source_node, disk,
5864
                                             target_node, dev_path,
5865
                                             cluster_name)
5866
      if result.fail_msg:
5867
        self.LogWarning("Can't copy data over for disk %d: %s",
5868
                        idx, result.fail_msg)
5869
        errs.append(result.fail_msg)
5870
        break
5871

    
5872
    if errs:
5873
      self.LogWarning("Some disks failed to copy, aborting")
5874
      try:
5875
        _RemoveDisks(self, instance, target_node=target_node)
5876
      finally:
5877
        self.cfg.ReleaseDRBDMinors(instance.name)
5878
        raise errors.OpExecError("Errors during disk copy: %s" %
5879
                                 (",".join(errs),))
5880

    
5881
    instance.primary_node = target_node
5882
    self.cfg.Update(instance, feedback_fn)
5883

    
5884
    self.LogInfo("Removing the disks on the original node")
5885
    _RemoveDisks(self, instance, target_node=source_node)
5886

    
5887
    # Only start the instance if it's marked as up
5888
    if instance.admin_up:
5889
      self.LogInfo("Starting instance %s on node %s",
5890
                   instance.name, target_node)
5891

    
5892
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
5893
                                           ignore_secondaries=True)
5894
      if not disks_ok:
5895
        _ShutdownInstanceDisks(self, instance)
5896
        raise errors.OpExecError("Can't activate the instance's disks")
5897

    
5898
      result = self.rpc.call_instance_start(target_node, instance, None, None)
5899
      msg = result.fail_msg
5900
      if msg:
5901
        _ShutdownInstanceDisks(self, instance)
5902
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
5903
                                 (instance.name, target_node, msg))
5904

    
5905

    
5906
class LUMigrateNode(LogicalUnit):
5907
  """Migrate all instances from a node.
5908

5909
  """
5910
  HPATH = "node-migrate"
5911
  HTYPE = constants.HTYPE_NODE
5912
  _OP_PARAMS = [
5913
    _PNodeName,
5914
    _PMigrationMode,
5915
    _PMigrationLive,
5916
    ]
5917
  REQ_BGL = False
5918

    
5919
  def ExpandNames(self):
5920
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
5921

    
5922
    self.needed_locks = {
5923
      locking.LEVEL_NODE: [self.op.node_name],
5924
      }
5925

    
5926
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5927

    
5928
    # Create tasklets for migrating instances for all instances on this node
5929
    names = []
5930
    tasklets = []
5931

    
5932
    for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name):
5933
      logging.debug("Migrating instance %s", inst.name)
5934
      names.append(inst.name)
5935

    
5936
      tasklets.append(TLMigrateInstance(self, inst.name, False))
5937

    
5938
    self.tasklets = tasklets
5939

    
5940
    # Declare instance locks
5941
    self.needed_locks[locking.LEVEL_INSTANCE] = names
5942

    
5943
  def DeclareLocks(self, level):
5944
    if level == locking.LEVEL_NODE:
5945
      self._LockInstancesNodes()
5946

    
5947
  def BuildHooksEnv(self):
5948
    """Build hooks env.
5949

5950
    This runs on the master, the primary and all the secondaries.
5951

5952
    """
5953
    env = {
5954
      "NODE_NAME": self.op.node_name,
5955
      }
5956

    
5957
    nl = [self.cfg.GetMasterNode()]
5958

    
5959
    return (env, nl, nl)
5960

    
5961

    
5962
class TLMigrateInstance(Tasklet):
5963
  """Tasklet class for instance migration.
5964

5965
  @type live: boolean
5966
  @ivar live: whether the migration will be done live or non-live;
5967
      this variable is initalized only after CheckPrereq has run
5968

5969
  """
5970
  def __init__(self, lu, instance_name, cleanup):
5971
    """Initializes this class.
5972

5973
    """
5974
    Tasklet.__init__(self, lu)
5975

    
5976
    # Parameters
5977
    self.instance_name = instance_name
5978
    self.cleanup = cleanup
5979
    self.live = False # will be overridden later
5980

    
5981
  def CheckPrereq(self):
5982
    """Check prerequisites.
5983

5984
    This checks that the instance is in the cluster.
5985

5986
    """
5987
    instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
5988
    instance = self.cfg.GetInstanceInfo(instance_name)
5989
    assert instance is not None
5990

    
5991
    if instance.disk_template != constants.DT_DRBD8:
5992
      raise errors.OpPrereqError("Instance's disk layout is not"
5993
                                 " drbd8, cannot migrate.", errors.ECODE_STATE)
5994

    
5995
    secondary_nodes = instance.secondary_nodes
5996
    if not secondary_nodes:
5997
      raise errors.ConfigurationError("No secondary node but using"
5998
                                      " drbd8 disk template")
5999

    
6000
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
6001

    
6002
    target_node = secondary_nodes[0]
6003
    # check memory requirements on the secondary node
6004
    _CheckNodeFreeMemory(self.lu, target_node, "migrating instance %s" %
6005
                         instance.name, i_be[constants.BE_MEMORY],
6006
                         instance.hypervisor)
6007

    
6008
    # check bridge existance
6009
    _CheckInstanceBridgesExist(self.lu, instance, node=target_node)
6010

    
6011
    if not self.cleanup:
6012
      _CheckNodeNotDrained(self.lu, target_node)
6013
      result = self.rpc.call_instance_migratable(instance.primary_node,
6014
                                                 instance)
6015
      result.Raise("Can't migrate, please use failover",
6016
                   prereq=True, ecode=errors.ECODE_STATE)
6017

    
6018
    self.instance = instance
6019

    
6020
    if self.lu.op.live is not None and self.lu.op.mode is not None:
6021
      raise errors.OpPrereqError("Only one of the 'live' and 'mode'"
6022
                                 " parameters are accepted",
6023
                                 errors.ECODE_INVAL)
6024
    if self.lu.op.live is not None:
6025
      if self.lu.op.live:
6026
        self.lu.op.mode = constants.HT_MIGRATION_LIVE
6027
      else:
6028
        self.lu.op.mode = constants.HT_MIGRATION_NONLIVE
6029
      # reset the 'live' parameter to None so that repeated
6030
      # invocations of CheckPrereq do not raise an exception
6031
      self.lu.op.live = None
6032
    elif self.lu.op.mode is None:
6033
      # read the default value from the hypervisor
6034
      i_hv = self.cfg.GetClusterInfo().FillHV(instance, skip_globals=False)
6035
      self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
6036

    
6037
    self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
6038

    
6039
  def _WaitUntilSync(self):
6040
    """Poll with custom rpc for disk sync.
6041

6042
    This uses our own step-based rpc call.
6043

6044
    """
6045
    self.feedback_fn("* wait until resync is done")
6046
    all_done = False
6047
    while not all_done:
6048
      all_done = True
6049
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
6050
                                            self.nodes_ip,
6051
                                            self.instance.disks)
6052
      min_percent = 100
6053
      for node, nres in result.items():
6054
        nres.Raise("Cannot resync disks on node %s" % node)
6055
        node_done, node_percent = nres.payload
6056
        all_done = all_done and node_done
6057
        if node_percent is not None:
6058
          min_percent = min(min_percent, node_percent)
6059
      if not all_done:
6060
        if min_percent < 100:
6061
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
6062
        time.sleep(2)
6063

    
6064
  def _EnsureSecondary(self, node):
6065
    """Demote a node to secondary.
6066

6067
    """
6068
    self.feedback_fn("* switching node %s to secondary mode" % node)
6069

    
6070
    for dev in self.instance.disks:
6071
      self.cfg.SetDiskID(dev, node)
6072

    
6073
    result = self.rpc.call_blockdev_close(node, self.instance.name,
6074
                                          self.instance.disks)
6075
    result.Raise("Cannot change disk to secondary on node %s" % node)
6076

    
6077
  def _GoStandalone(self):
6078
    """Disconnect from the network.
6079

6080
    """
6081
    self.feedback_fn("* changing into standalone mode")
6082
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
6083
                                               self.instance.disks)
6084
    for node, nres in result.items():
6085
      nres.Raise("Cannot disconnect disks node %s" % node)
6086

    
6087
  def _GoReconnect(self, multimaster):
6088
    """Reconnect to the network.
6089

6090
    """
6091
    if multimaster:
6092
      msg = "dual-master"
6093
    else:
6094
      msg = "single-master"
6095
    self.feedback_fn("* changing disks into %s mode" % msg)
6096
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
6097
                                           self.instance.disks,
6098
                                           self.instance.name, multimaster)
6099
    for node, nres in result.items():
6100
      nres.Raise("Cannot change disks config on node %s" % node)
6101

    
6102
  def _ExecCleanup(self):
6103
    """Try to cleanup after a failed migration.
6104

6105
    The cleanup is done by:
6106
      - check that the instance is running only on one node
6107
        (and update the config if needed)
6108
      - change disks on its secondary node to secondary
6109
      - wait until disks are fully synchronized
6110
      - disconnect from the network
6111
      - change disks into single-master mode
6112
      - wait again until disks are fully synchronized
6113

6114
    """
6115
    instance = self.instance
6116
    target_node = self.target_node
6117
    source_node = self.source_node
6118

    
6119
    # check running on only one node
6120
    self.feedback_fn("* checking where the instance actually runs"
6121
                     " (if this hangs, the hypervisor might be in"
6122
                     " a bad state)")
6123
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
6124
    for node, result in ins_l.items():
6125
      result.Raise("Can't contact node %s" % node)
6126

    
6127
    runningon_source = instance.name in ins_l[source_node].payload
6128
    runningon_target = instance.name in ins_l[target_node].payload
6129

    
6130
    if runningon_source and runningon_target:
6131
      raise errors.OpExecError("Instance seems to be running on two nodes,"
6132
                               " or the hypervisor is confused. You will have"
6133
                               " to ensure manually that it runs only on one"
6134
                               " and restart this operation.")
6135

    
6136
    if not (runningon_source or runningon_target):
6137
      raise errors.OpExecError("Instance does not seem to be running at all."
6138
                               " In this case, it's safer to repair by"
6139
                               " running 'gnt-instance stop' to ensure disk"
6140
                               " shutdown, and then restarting it.")
6141

    
6142
    if runningon_target:
6143
      # the migration has actually succeeded, we need to update the config
6144
      self.feedback_fn("* instance running on secondary node (%s),"
6145
                       " updating config" % target_node)
6146
      instance.primary_node = target_node
6147
      self.cfg.Update(instance, self.feedback_fn)
6148
      demoted_node = source_node
6149
    else:
6150
      self.feedback_fn("* instance confirmed to be running on its"
6151
                       " primary node (%s)" % source_node)
6152
      demoted_node = target_node
6153

    
6154
    self._EnsureSecondary(demoted_node)
6155
    try:
6156
      self._WaitUntilSync()
6157
    except errors.OpExecError:
6158
      # we ignore here errors, since if the device is standalone, it
6159
      # won't be able to sync
6160
      pass
6161
    self._GoStandalone()
6162
    self._GoReconnect(False)
6163
    self._WaitUntilSync()
6164

    
6165
    self.feedback_fn("* done")
6166

    
6167
  def _RevertDiskStatus(self):
6168
    """Try to revert the disk status after a failed migration.
6169

6170
    """
6171
    target_node = self.target_node
6172
    try:
6173
      self._EnsureSecondary(target_node)
6174
      self._GoStandalone()
6175
      self._GoReconnect(False)
6176
      self._WaitUntilSync()
6177
    except errors.OpExecError, err:
6178
      self.lu.LogWarning("Migration failed and I can't reconnect the"
6179
                         " drives: error '%s'\n"
6180
                         "Please look and recover the instance status" %
6181
                         str(err))
6182

    
6183
  def _AbortMigration(self):
6184
    """Call the hypervisor code to abort a started migration.
6185

6186
    """
6187
    instance = self.instance
6188
    target_node = self.target_node
6189
    migration_info = self.migration_info
6190

    
6191
    abort_result = self.rpc.call_finalize_migration(target_node,
6192
                                                    instance,
6193
                                                    migration_info,
6194
                                                    False)
6195
    abort_msg = abort_result.fail_msg
6196
    if abort_msg:
6197
      logging.error("Aborting migration failed on target node %s: %s",
6198
                    target_node, abort_msg)
6199
      # Don't raise an exception here, as we stil have to try to revert the
6200
      # disk status, even if this step failed.
6201

    
6202
  def _ExecMigration(self):
6203
    """Migrate an instance.
6204

6205
    The migrate is done by:
6206
      - change the disks into dual-master mode
6207
      - wait until disks are fully synchronized again
6208
      - migrate the instance
6209
      - change disks on the new secondary node (the old primary) to secondary
6210
      - wait until disks are fully synchronized
6211
      - change disks into single-master mode
6212

6213
    """
6214
    instance = self.instance
6215
    target_node = self.target_node
6216
    source_node = self.source_node
6217

    
6218
    self.feedback_fn("* checking disk consistency between source and target")
6219
    for dev in instance.disks:
6220
      if not _CheckDiskConsistency(self.lu, dev, target_node, False):
6221
        raise errors.OpExecError("Disk %s is degraded or not fully"
6222
                                 " synchronized on target node,"
6223
                                 " aborting migrate." % dev.iv_name)
6224

    
6225
    # First get the migration information from the remote node
6226
    result = self.rpc.call_migration_info(source_node, instance)
6227
    msg = result.fail_msg
6228
    if msg:
6229
      log_err = ("Failed fetching source migration information from %s: %s" %
6230
                 (source_node, msg))
6231
      logging.error(log_err)
6232
      raise errors.OpExecError(log_err)
6233

    
6234
    self.migration_info = migration_info = result.payload
6235

    
6236
    # Then switch the disks to master/master mode
6237
    self._EnsureSecondary(target_node)
6238
    self._GoStandalone()
6239
    self._GoReconnect(True)
6240
    self._WaitUntilSync()
6241

    
6242
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
6243
    result = self.rpc.call_accept_instance(target_node,
6244
                                           instance,
6245
                                           migration_info,
6246
                                           self.nodes_ip[target_node])
6247

    
6248
    msg = result.fail_msg
6249
    if msg:
6250
      logging.error("Instance pre-migration failed, trying to revert"
6251
                    " disk status: %s", msg)
6252
      self.feedback_fn("Pre-migration failed, aborting")
6253
      self._AbortMigration()
6254
      self._RevertDiskStatus()
6255
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
6256
                               (instance.name, msg))
6257

    
6258
    self.feedback_fn("* migrating instance to %s" % target_node)
6259
    time.sleep(10)
6260
    result = self.rpc.call_instance_migrate(source_node, instance,
6261
                                            self.nodes_ip[target_node],
6262
                                            self.live)
6263
    msg = result.fail_msg
6264
    if msg:
6265
      logging.error("Instance migration failed, trying to revert"
6266
                    " disk status: %s", msg)
6267
      self.feedback_fn("Migration failed, aborting")
6268
      self._AbortMigration()
6269
      self._RevertDiskStatus()
6270
      raise errors.OpExecError("Could not migrate instance %s: %s" %
6271
                               (instance.name, msg))
6272
    time.sleep(10)
6273

    
6274
    instance.primary_node = target_node
6275
    # distribute new instance config to the other nodes
6276
    self.cfg.Update(instance, self.feedback_fn)
6277

    
6278
    result = self.rpc.call_finalize_migration(target_node,
6279
                                              instance,
6280
                                              migration_info,
6281
                                              True)
6282
    msg = result.fail_msg
6283
    if msg:
6284
      logging.error("Instance migration succeeded, but finalization failed:"
6285
                    " %s", msg)
6286
      raise errors.OpExecError("Could not finalize instance migration: %s" %
6287
                               msg)
6288

    
6289
    self._EnsureSecondary(source_node)
6290
    self._WaitUntilSync()
6291
    self._GoStandalone()
6292
    self._GoReconnect(False)
6293
    self._WaitUntilSync()
6294

    
6295
    self.feedback_fn("* done")
6296

    
6297
  def Exec(self, feedback_fn):
6298
    """Perform the migration.
6299

6300
    """
6301
    feedback_fn("Migrating instance %s" % self.instance.name)
6302

    
6303
    self.feedback_fn = feedback_fn
6304

    
6305
    self.source_node = self.instance.primary_node
6306
    self.target_node = self.instance.secondary_nodes[0]
6307
    self.all_nodes = [self.source_node, self.target_node]
6308
    self.nodes_ip = {
6309
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
6310
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
6311
      }
6312

    
6313
    if self.cleanup:
6314
      return self._ExecCleanup()
6315
    else:
6316
      return self._ExecMigration()
6317

    
6318

    
6319
def _CreateBlockDev(lu, node, instance, device, force_create,
6320
                    info, force_open):
6321
  """Create a tree of block devices on a given node.
6322

6323
  If this device type has to be created on secondaries, create it and
6324
  all its children.
6325

6326
  If not, just recurse to children keeping the same 'force' value.
6327

6328
  @param lu: the lu on whose behalf we execute
6329
  @param node: the node on which to create the device
6330
  @type instance: L{objects.Instance}
6331
  @param instance: the instance which owns the device
6332
  @type device: L{objects.Disk}
6333
  @param device: the device to create
6334
  @type force_create: boolean
6335
  @param force_create: whether to force creation of this device; this
6336
      will be change to True whenever we find a device which has
6337
      CreateOnSecondary() attribute
6338
  @param info: the extra 'metadata' we should attach to the device
6339
      (this will be represented as a LVM tag)
6340
  @type force_open: boolean
6341
  @param force_open: this parameter will be passes to the
6342
      L{backend.BlockdevCreate} function where it specifies
6343
      whether we run on primary or not, and it affects both
6344
      the child assembly and the device own Open() execution
6345

6346
  """
6347
  if device.CreateOnSecondary():
6348
    force_create = True
6349

    
6350
  if device.children:
6351
    for child in device.children:
6352
      _CreateBlockDev(lu, node, instance, child, force_create,
6353
                      info, force_open)
6354

    
6355
  if not force_create:
6356
    return
6357

    
6358
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
6359

    
6360

    
6361
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
6362
  """Create a single block device on a given node.
6363

6364
  This will not recurse over children of the device, so they must be
6365
  created in advance.
6366

6367
  @param lu: the lu on whose behalf we execute
6368
  @param node: the node on which to create the device
6369
  @type instance: L{objects.Instance}
6370
  @param instance: the instance which owns the device
6371
  @type device: L{objects.Disk}
6372
  @param device: the device to create
6373
  @param info: the extra 'metadata' we should attach to the device
6374
      (this will be represented as a LVM tag)
6375
  @type force_open: boolean
6376
  @param force_open: this parameter will be passes to the
6377
      L{backend.BlockdevCreate} function where it specifies
6378
      whether we run on primary or not, and it affects both
6379
      the child assembly and the device own Open() execution
6380

6381
  """
6382
  lu.cfg.SetDiskID(device, node)
6383
  result = lu.rpc.call_blockdev_create(node, device, device.size,
6384
                                       instance.name, force_open, info)
6385
  result.Raise("Can't create block device %s on"
6386
               " node %s for instance %s" % (device, node, instance.name))
6387
  if device.physical_id is None:
6388
    device.physical_id = result.payload
6389

    
6390

    
6391
def _GenerateUniqueNames(lu, exts):
6392
  """Generate a suitable LV name.
6393

6394
  This will generate a logical volume name for the given instance.
6395

6396
  """
6397
  results = []
6398
  for val in exts:
6399
    new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
6400
    results.append("%s%s" % (new_id, val))
6401
  return results
6402

    
6403

    
6404
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
6405
                         p_minor, s_minor):
6406
  """Generate a drbd8 device complete with its children.
6407

6408
  """
6409
  port = lu.cfg.AllocatePort()
6410
  vgname = lu.cfg.GetVGName()
6411
  shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
6412
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
6413
                          logical_id=(vgname, names[0]))
6414
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
6415
                          logical_id=(vgname, names[1]))
6416
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
6417
                          logical_id=(primary, secondary, port,
6418
                                      p_minor, s_minor,
6419
                                      shared_secret),
6420
                          children=[dev_data, dev_meta],
6421
                          iv_name=iv_name)
6422
  return drbd_dev
6423

    
6424

    
6425
def _GenerateDiskTemplate(lu, template_name,
6426
                          instance_name, primary_node,
6427
                          secondary_nodes, disk_info,
6428
                          file_storage_dir, file_driver,
6429
                          base_index):
6430
  """Generate the entire disk layout for a given template type.
6431

6432
  """
6433
  #TODO: compute space requirements
6434

    
6435
  vgname = lu.cfg.GetVGName()
6436
  disk_count = len(disk_info)
6437
  disks = []
6438
  if template_name == constants.DT_DISKLESS:
6439
    pass
6440
  elif template_name == constants.DT_PLAIN:
6441
    if len(secondary_nodes) != 0:
6442
      raise errors.ProgrammerError("Wrong template configuration")
6443

    
6444
    names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
6445
                                      for i in range(disk_count)])
6446
    for idx, disk in enumerate(disk_info):
6447
      disk_index = idx + base_index
6448
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
6449
                              logical_id=(vgname, names[idx]),
6450
                              iv_name="disk/%d" % disk_index,
6451
                              mode=disk["mode"])
6452
      disks.append(disk_dev)
6453
  elif template_name == constants.DT_DRBD8:
6454
    if len(secondary_nodes) != 1:
6455
      raise errors.ProgrammerError("Wrong template configuration")
6456
    remote_node = secondary_nodes[0]
6457
    minors = lu.cfg.AllocateDRBDMinor(
6458
      [primary_node, remote_node] * len(disk_info), instance_name)
6459

    
6460
    names = []
6461
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
6462
                                               for i in range(disk_count)]):
6463
      names.append(lv_prefix + "_data")
6464
      names.append(lv_prefix + "_meta")
6465
    for idx, disk in enumerate(disk_info):
6466
      disk_index = idx + base_index
6467
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
6468
                                      disk["size"], names[idx*2:idx*2+2],
6469
                                      "disk/%d" % disk_index,
6470
                                      minors[idx*2], minors[idx*2+1])
6471
      disk_dev.mode = disk["mode"]
6472
      disks.append(disk_dev)
6473
  elif template_name == constants.DT_FILE:
6474
    if len(secondary_nodes) != 0:
6475
      raise errors.ProgrammerError("Wrong template configuration")
6476

    
6477
    _RequireFileStorage()
6478

    
6479
    for idx, disk in enumerate(disk_info):
6480
      disk_index = idx + base_index
6481
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
6482
                              iv_name="disk/%d" % disk_index,
6483
                              logical_id=(file_driver,
6484
                                          "%s/disk%d" % (file_storage_dir,
6485
                                                         disk_index)),
6486
                              mode=disk["mode"])
6487
      disks.append(disk_dev)
6488
  else:
6489
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
6490
  return disks
6491

    
6492

    
6493
def _GetInstanceInfoText(instance):
6494
  """Compute that text that should be added to the disk's metadata.
6495

6496
  """
6497
  return "originstname+%s" % instance.name
6498

    
6499

    
6500
def _CalcEta(time_taken, written, total_size):
6501
  """Calculates the ETA based on size written and total size.
6502

6503
  @param time_taken: The time taken so far
6504
  @param written: amount written so far
6505
  @param total_size: The total size of data to be written
6506
  @return: The remaining time in seconds
6507

6508
  """
6509
  avg_time = time_taken / float(written)
6510
  return (total_size - written) * avg_time
6511

    
6512

    
6513
def _WipeDisks(lu, instance):
6514
  """Wipes instance disks.
6515

6516
  @type lu: L{LogicalUnit}
6517
  @param lu: the logical unit on whose behalf we execute
6518
  @type instance: L{objects.Instance}
6519
  @param instance: the instance whose disks we should create
6520
  @return: the success of the wipe
6521

6522
  """
6523
  node = instance.primary_node
6524
  for idx, device in enumerate(instance.disks):
6525
    lu.LogInfo("* Wiping disk %d", idx)
6526
    logging.info("Wiping disk %d for instance %s", idx, instance.name)
6527

    
6528
    # The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
6529
    # MAX_WIPE_CHUNK at max
6530
    wipe_chunk_size = min(constants.MAX_WIPE_CHUNK, device.size / 100.0 *
6531
                          constants.MIN_WIPE_CHUNK_PERCENT)
6532

    
6533
    offset = 0
6534
    size = device.size
6535
    last_output = 0
6536
    start_time = time.time()
6537

    
6538
    while offset < size:
6539
      wipe_size = min(wipe_chunk_size, size - offset)
6540
      result = lu.rpc.call_blockdev_wipe(node, device, offset, wipe_size)
6541
      result.Raise("Could not wipe disk %d at offset %d for size %d" %
6542
                   (idx, offset, wipe_size))
6543
      now = time.time()
6544
      offset += wipe_size
6545
      if now - last_output >= 60:
6546
        eta = _CalcEta(now - start_time, offset, size)
6547
        lu.LogInfo(" - done: %.1f%% ETA: %s" %
6548
                   (offset / float(size) * 100, utils.FormatSeconds(eta)))
6549
        last_output = now
6550

    
6551

    
6552
def _CreateDisks(lu, instance, to_skip=None, target_node=None):
6553
  """Create all disks for an instance.
6554

6555
  This abstracts away some work from AddInstance.
6556

6557
  @type lu: L{LogicalUnit}
6558
  @param lu: the logical unit on whose behalf we execute
6559
  @type instance: L{objects.Instance}
6560
  @param instance: the instance whose disks we should create
6561
  @type to_skip: list
6562
  @param to_skip: list of indices to skip
6563
  @type target_node: string
6564
  @param target_node: if passed, overrides the target node for creation
6565
  @rtype: boolean
6566
  @return: the success of the creation
6567

6568
  """
6569
  info = _GetInstanceInfoText(instance)
6570
  if target_node is None:
6571
    pnode = instance.primary_node
6572
    all_nodes = instance.all_nodes
6573
  else:
6574
    pnode = target_node
6575
    all_nodes = [pnode]
6576

    
6577
  if instance.disk_template == constants.DT_FILE:
6578
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
6579
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
6580

    
6581
    result.Raise("Failed to create directory '%s' on"
6582
                 " node %s" % (file_storage_dir, pnode))
6583

    
6584
  # Note: this needs to be kept in sync with adding of disks in
6585
  # LUSetInstanceParams
6586
  for idx, device in enumerate(instance.disks):
6587
    if to_skip and idx in to_skip:
6588
      continue
6589
    logging.info("Creating volume %s for instance %s",
6590
                 device.iv_name, instance.name)
6591
    #HARDCODE
6592
    for node in all_nodes:
6593
      f_create = node == pnode
6594
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
6595

    
6596

    
6597
def _RemoveDisks(lu, instance, target_node=None):
6598
  """Remove all disks for an instance.
6599

6600
  This abstracts away some work from `AddInstance()` and
6601
  `RemoveInstance()`. Note that in case some of the devices couldn't
6602
  be removed, the removal will continue with the other ones (compare
6603
  with `_CreateDisks()`).
6604

6605
  @type lu: L{LogicalUnit}
6606
  @param lu: the logical unit on whose behalf we execute
6607
  @type instance: L{objects.Instance}
6608
  @param instance: the instance whose disks we should remove
6609
  @type target_node: string
6610
  @param target_node: used to override the node on which to remove the disks
6611
  @rtype: boolean
6612
  @return: the success of the removal
6613

6614
  """
6615
  logging.info("Removing block devices for instance %s", instance.name)
6616

    
6617
  all_result = True
6618
  for device in instance.disks:
6619
    if target_node:
6620
      edata = [(target_node, device)]
6621
    else:
6622
      edata = device.ComputeNodeTree(instance.primary_node)
6623
    for node, disk in edata:
6624
      lu.cfg.SetDiskID(disk, node)
6625
      msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
6626
      if msg:
6627
        lu.LogWarning("Could not remove block device %s on node %s,"
6628
                      " continuing anyway: %s", device.iv_name, node, msg)
6629
        all_result = False
6630

    
6631
  if instance.disk_template == constants.DT_FILE:
6632
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
6633
    if target_node:
6634
      tgt = target_node
6635
    else:
6636
      tgt = instance.primary_node
6637
    result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
6638
    if result.fail_msg:
6639
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
6640
                    file_storage_dir, instance.primary_node, result.fail_msg)
6641
      all_result = False
6642

    
6643
  return all_result
6644

    
6645

    
6646
def _ComputeDiskSize(disk_template, disks):
6647
  """Compute disk size requirements in the volume group
6648

6649
  """
6650
  # Required free disk space as a function of disk and swap space
6651
  req_size_dict = {
6652
    constants.DT_DISKLESS: None,
6653
    constants.DT_PLAIN: sum(d["size"] for d in disks),
6654
    # 128 MB are added for drbd metadata for each disk
6655
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
6656
    constants.DT_FILE: None,
6657
  }
6658

    
6659
  if disk_template not in req_size_dict:
6660
    raise errors.ProgrammerError("Disk template '%s' size requirement"
6661
                                 " is unknown" %  disk_template)
6662

    
6663
  return req_size_dict[disk_template]
6664

    
6665

    
6666
def _CheckHVParams(lu, nodenames, hvname, hvparams):
6667
  """Hypervisor parameter validation.
6668

6669
  This function abstract the hypervisor parameter validation to be
6670
  used in both instance create and instance modify.
6671

6672
  @type lu: L{LogicalUnit}
6673
  @param lu: the logical unit for which we check
6674
  @type nodenames: list
6675
  @param nodenames: the list of nodes on which we should check
6676
  @type hvname: string
6677
  @param hvname: the name of the hypervisor we should use
6678
  @type hvparams: dict
6679
  @param hvparams: the parameters which we need to check
6680
  @raise errors.OpPrereqError: if the parameters are not valid
6681

6682
  """
6683
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
6684
                                                  hvname,
6685
                                                  hvparams)
6686
  for node in nodenames:
6687
    info = hvinfo[node]
6688
    if info.offline:
6689
      continue
6690
    info.Raise("Hypervisor parameter validation failed on node %s" % node)
6691

    
6692

    
6693
def _CheckOSParams(lu, required, nodenames, osname, osparams):
6694
  """OS parameters validation.
6695

6696
  @type lu: L{LogicalUnit}
6697
  @param lu: the logical unit for which we check
6698
  @type required: boolean
6699
  @param required: whether the validation should fail if the OS is not
6700
      found
6701
  @type nodenames: list
6702
  @param nodenames: the list of nodes on which we should check
6703
  @type osname: string
6704
  @param osname: the name of the hypervisor we should use
6705
  @type osparams: dict
6706
  @param osparams: the parameters which we need to check
6707
  @raise errors.OpPrereqError: if the parameters are not valid
6708

6709
  """
6710
  result = lu.rpc.call_os_validate(required, nodenames, osname,
6711
                                   [constants.OS_VALIDATE_PARAMETERS],
6712
                                   osparams)
6713
  for node, nres in result.items():
6714
    # we don't check for offline cases since this should be run only
6715
    # against the master node and/or an instance's nodes
6716
    nres.Raise("OS Parameters validation failed on node %s" % node)
6717
    if not nres.payload:
6718
      lu.LogInfo("OS %s not found on node %s, validation skipped",
6719
                 osname, node)
6720

    
6721

    
6722
class LUCreateInstance(LogicalUnit):
6723
  """Create an instance.
6724

6725
  """
6726
  HPATH = "instance-add"
6727
  HTYPE = constants.HTYPE_INSTANCE
6728
  _OP_PARAMS = [
6729
    _PInstanceName,
6730
    ("mode", ht.NoDefault, ht.TElemOf(constants.INSTANCE_CREATE_MODES)),
6731
    ("start", True, ht.TBool),
6732
    ("wait_for_sync", True, ht.TBool),
6733
    ("ip_check", True, ht.TBool),
6734
    ("name_check", True, ht.TBool),
6735
    ("disks", ht.NoDefault, ht.TListOf(ht.TDict)),
6736
    ("nics", ht.NoDefault, ht.TListOf(ht.TDict)),
6737
    ("hvparams", ht.EmptyDict, ht.TDict),
6738
    ("beparams", ht.EmptyDict, ht.TDict),
6739
    ("osparams", ht.EmptyDict, ht.TDict),
6740
    ("no_install", None, ht.TMaybeBool),
6741
    ("os_type", None, ht.TMaybeString),
6742
    ("force_variant", False, ht.TBool),
6743
    ("source_handshake", None, ht.TOr(ht.TList, ht.TNone)),
6744
    ("source_x509_ca", None, ht.TMaybeString),
6745
    ("source_instance_name", None, ht.TMaybeString),
6746
    ("src_node", None, ht.TMaybeString),
6747
    ("src_path", None, ht.TMaybeString),
6748
    ("pnode", None, ht.TMaybeString),
6749
    ("snode", None, ht.TMaybeString),
6750
    ("iallocator", None, ht.TMaybeString),
6751
    ("hypervisor", None, ht.TMaybeString),
6752
    ("disk_template", ht.NoDefault, _CheckDiskTemplate),
6753
    ("identify_defaults", False, ht.TBool),
6754
    ("file_driver", None, ht.TOr(ht.TNone, ht.TElemOf(constants.FILE_DRIVER))),
6755
    ("file_storage_dir", None, ht.TMaybeString),
6756
    ]
6757
  REQ_BGL = False
6758

    
6759
  def CheckArguments(self):
6760
    """Check arguments.
6761

6762
    """
6763
    # do not require name_check to ease forward/backward compatibility
6764
    # for tools
6765
    if self.op.no_install and self.op.start:
6766
      self.LogInfo("No-installation mode selected, disabling startup")
6767
      self.op.start = False
6768
    # validate/normalize the instance name
6769
    self.op.instance_name = \
6770
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
6771

    
6772
    if self.op.ip_check and not self.op.name_check:
6773
      # TODO: make the ip check more flexible and not depend on the name check
6774
      raise errors.OpPrereqError("Cannot do ip check without a name check",
6775
                                 errors.ECODE_INVAL)
6776

    
6777
    # check nics' parameter names
6778
    for nic in self.op.nics:
6779
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
6780

    
6781
    # check disks. parameter names and consistent adopt/no-adopt strategy
6782
    has_adopt = has_no_adopt = False
6783
    for disk in self.op.disks:
6784
      utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
6785
      if "adopt" in disk:
6786
        has_adopt = True
6787
      else:
6788
        has_no_adopt = True
6789
    if has_adopt and has_no_adopt:
6790
      raise errors.OpPrereqError("Either all disks are adopted or none is",
6791
                                 errors.ECODE_INVAL)
6792
    if has_adopt:
6793
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
6794
        raise errors.OpPrereqError("Disk adoption is not supported for the"
6795
                                   " '%s' disk template" %
6796
                                   self.op.disk_template,
6797
                                   errors.ECODE_INVAL)
6798
      if self.op.iallocator is not None:
6799
        raise errors.OpPrereqError("Disk adoption not allowed with an"
6800
                                   " iallocator script", errors.ECODE_INVAL)
6801
      if self.op.mode == constants.INSTANCE_IMPORT:
6802
        raise errors.OpPrereqError("Disk adoption not allowed for"
6803
                                   " instance import", errors.ECODE_INVAL)
6804

    
6805
    self.adopt_disks = has_adopt
6806

    
6807
    # instance name verification
6808
    if self.op.name_check:
6809
      self.hostname1 = netutils.GetHostname(name=self.op.instance_name)
6810
      self.op.instance_name = self.hostname1.name
6811
      # used in CheckPrereq for ip ping check
6812
      self.check_ip = self.hostname1.ip
6813
    else:
6814
      self.check_ip = None
6815

    
6816
    # file storage checks
6817
    if (self.op.file_driver and
6818
        not self.op.file_driver in constants.FILE_DRIVER):
6819
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
6820
                                 self.op.file_driver, errors.ECODE_INVAL)
6821

    
6822
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
6823
      raise errors.OpPrereqError("File storage directory path not absolute",
6824
                                 errors.ECODE_INVAL)
6825

    
6826
    ### Node/iallocator related checks
6827
    _CheckIAllocatorOrNode(self, "iallocator", "pnode")
6828

    
6829
    if self.op.pnode is not None:
6830
      if self.op.disk_template in constants.DTS_NET_MIRROR:
6831
        if self.op.snode is None:
6832
          raise errors.OpPrereqError("The networked disk templates need"
6833
                                     " a mirror node", errors.ECODE_INVAL)
6834
      elif self.op.snode:
6835
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
6836
                        " template")
6837
        self.op.snode = None
6838

    
6839
    self._cds = _GetClusterDomainSecret()
6840

    
6841
    if self.op.mode == constants.INSTANCE_IMPORT:
6842
      # On import force_variant must be True, because if we forced it at
6843
      # initial install, our only chance when importing it back is that it
6844
      # works again!
6845
      self.op.force_variant = True
6846

    
6847
      if self.op.no_install:
6848
        self.LogInfo("No-installation mode has no effect during import")
6849

    
6850
    elif self.op.mode == constants.INSTANCE_CREATE:
6851
      if self.op.os_type is None:
6852
        raise errors.OpPrereqError("No guest OS specified",
6853
                                   errors.ECODE_INVAL)
6854
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
6855
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
6856
                                   " installation" % self.op.os_type,
6857
                                   errors.ECODE_STATE)
6858
      if self.op.disk_template is None:
6859
        raise errors.OpPrereqError("No disk template specified",
6860
                                   errors.ECODE_INVAL)
6861

    
6862
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
6863
      # Check handshake to ensure both clusters have the same domain secret
6864
      src_handshake = self.op.source_handshake
6865
      if not src_handshake:
6866
        raise errors.OpPrereqError("Missing source handshake",
6867
                                   errors.ECODE_INVAL)
6868

    
6869
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
6870
                                                           src_handshake)
6871
      if errmsg:
6872
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
6873
                                   errors.ECODE_INVAL)
6874

    
6875
      # Load and check source CA
6876
      self.source_x509_ca_pem = self.op.source_x509_ca
6877
      if not self.source_x509_ca_pem:
6878
        raise errors.OpPrereqError("Missing source X509 CA",
6879
                                   errors.ECODE_INVAL)
6880

    
6881
      try:
6882
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
6883
                                                    self._cds)
6884
      except OpenSSL.crypto.Error, err:
6885
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
6886
                                   (err, ), errors.ECODE_INVAL)
6887

    
6888
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
6889
      if errcode is not None:
6890
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
6891
                                   errors.ECODE_INVAL)
6892

    
6893
      self.source_x509_ca = cert
6894

    
6895
      src_instance_name = self.op.source_instance_name
6896
      if not src_instance_name:
6897
        raise errors.OpPrereqError("Missing source instance name",
6898
                                   errors.ECODE_INVAL)
6899

    
6900
      self.source_instance_name = \
6901
          netutils.GetHostname(name=src_instance_name).name
6902

    
6903
    else:
6904
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
6905
                                 self.op.mode, errors.ECODE_INVAL)
6906

    
6907
  def ExpandNames(self):
6908
    """ExpandNames for CreateInstance.
6909

6910
    Figure out the right locks for instance creation.
6911

6912
    """
6913
    self.needed_locks = {}
6914

    
6915
    instance_name = self.op.instance_name
6916
    # this is just a preventive check, but someone might still add this
6917
    # instance in the meantime, and creation will fail at lock-add time
6918
    if instance_name in self.cfg.GetInstanceList():
6919
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
6920
                                 instance_name, errors.ECODE_EXISTS)
6921

    
6922
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
6923

    
6924
    if self.op.iallocator:
6925
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6926
    else:
6927
      self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
6928
      nodelist = [self.op.pnode]
6929
      if self.op.snode is not None:
6930
        self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
6931
        nodelist.append(self.op.snode)
6932
      self.needed_locks[locking.LEVEL_NODE] = nodelist
6933

    
6934
    # in case of import lock the source node too
6935
    if self.op.mode == constants.INSTANCE_IMPORT:
6936
      src_node = self.op.src_node
6937
      src_path = self.op.src_path
6938

    
6939
      if src_path is None:
6940
        self.op.src_path = src_path = self.op.instance_name
6941

    
6942
      if src_node is None:
6943
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6944
        self.op.src_node = None
6945
        if os.path.isabs(src_path):
6946
          raise errors.OpPrereqError("Importing an instance from an absolute"
6947
                                     " path requires a source node option.",
6948
                                     errors.ECODE_INVAL)
6949
      else:
6950
        self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
6951
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
6952
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
6953
        if not os.path.isabs(src_path):
6954
          self.op.src_path = src_path = \
6955
            utils.PathJoin(constants.EXPORT_DIR, src_path)
6956

    
6957
  def _RunAllocator(self):
6958
    """Run the allocator based on input opcode.
6959

6960
    """
6961
    nics = [n.ToDict() for n in self.nics]
6962
    ial = IAllocator(self.cfg, self.rpc,
6963
                     mode=constants.IALLOCATOR_MODE_ALLOC,
6964
                     name=self.op.instance_name,
6965
                     disk_template=self.op.disk_template,
6966
                     tags=[],
6967
                     os=self.op.os_type,
6968
                     vcpus=self.be_full[constants.BE_VCPUS],
6969
                     mem_size=self.be_full[constants.BE_MEMORY],
6970
                     disks=self.disks,
6971
                     nics=nics,
6972
                     hypervisor=self.op.hypervisor,
6973
                     )
6974

    
6975
    ial.Run(self.op.iallocator)
6976

    
6977
    if not ial.success:
6978
      raise errors.OpPrereqError("Can't compute nodes using"
6979
                                 " iallocator '%s': %s" %
6980
                                 (self.op.iallocator, ial.info),
6981
                                 errors.ECODE_NORES)
6982
    if len(ial.result) != ial.required_nodes:
6983
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
6984
                                 " of nodes (%s), required %s" %
6985
                                 (self.op.iallocator, len(ial.result),
6986
                                  ial.required_nodes), errors.ECODE_FAULT)
6987
    self.op.pnode = ial.result[0]
6988
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
6989
                 self.op.instance_name, self.op.iallocator,
6990
                 utils.CommaJoin(ial.result))
6991
    if ial.required_nodes == 2:
6992
      self.op.snode = ial.result[1]
6993

    
6994
  def BuildHooksEnv(self):
6995
    """Build hooks env.
6996

6997
    This runs on master, primary and secondary nodes of the instance.
6998

6999
    """
7000
    env = {
7001
      "ADD_MODE": self.op.mode,
7002
      }
7003
    if self.op.mode == constants.INSTANCE_IMPORT:
7004
      env["SRC_NODE"] = self.op.src_node
7005
      env["SRC_PATH"] = self.op.src_path
7006
      env["SRC_IMAGES"] = self.src_images
7007

    
7008
    env.update(_BuildInstanceHookEnv(
7009
      name=self.op.instance_name,
7010
      primary_node=self.op.pnode,
7011
      secondary_nodes=self.secondaries,
7012
      status=self.op.start,
7013
      os_type=self.op.os_type,
7014
      memory=self.be_full[constants.BE_MEMORY],
7015
      vcpus=self.be_full[constants.BE_VCPUS],
7016
      nics=_NICListToTuple(self, self.nics),
7017
      disk_template=self.op.disk_template,
7018
      disks=[(d["size"], d["mode"]) for d in self.disks],
7019
      bep=self.be_full,
7020
      hvp=self.hv_full,
7021
      hypervisor_name=self.op.hypervisor,
7022
    ))
7023

    
7024
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
7025
          self.secondaries)
7026
    return env, nl, nl
7027

    
7028
  def _ReadExportInfo(self):
7029
    """Reads the export information from disk.
7030

7031
    It will override the opcode source node and path with the actual
7032
    information, if these two were not specified before.
7033

7034
    @return: the export information
7035

7036
    """
7037
    assert self.op.mode == constants.INSTANCE_IMPORT
7038

    
7039
    src_node = self.op.src_node
7040
    src_path = self.op.src_path
7041

    
7042
    if src_node is None:
7043
      locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
7044
      exp_list = self.rpc.call_export_list(locked_nodes)
7045
      found = False
7046
      for node in exp_list:
7047
        if exp_list[node].fail_msg:
7048
          continue
7049
        if src_path in exp_list[node].payload:
7050
          found = True
7051
          self.op.src_node = src_node = node
7052
          self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
7053
                                                       src_path)
7054
          break
7055
      if not found:
7056
        raise errors.OpPrereqError("No export found for relative path %s" %
7057
                                    src_path, errors.ECODE_INVAL)
7058

    
7059
    _CheckNodeOnline(self, src_node)
7060
    result = self.rpc.call_export_info(src_node, src_path)
7061
    result.Raise("No export or invalid export found in dir %s" % src_path)
7062

    
7063
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
7064
    if not export_info.has_section(constants.INISECT_EXP):
7065
      raise errors.ProgrammerError("Corrupted export config",
7066
                                   errors.ECODE_ENVIRON)
7067

    
7068
    ei_version = export_info.get(constants.INISECT_EXP, "version")
7069
    if (int(ei_version) != constants.EXPORT_VERSION):
7070
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
7071
                                 (ei_version, constants.EXPORT_VERSION),
7072
                                 errors.ECODE_ENVIRON)
7073
    return export_info
7074

    
7075
  def _ReadExportParams(self, einfo):
7076
    """Use export parameters as defaults.
7077

7078
    In case the opcode doesn't specify (as in override) some instance
7079
    parameters, then try to use them from the export information, if
7080
    that declares them.
7081

7082
    """
7083
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
7084

    
7085
    if self.op.disk_template is None:
7086
      if einfo.has_option(constants.INISECT_INS, "disk_template"):
7087
        self.op.disk_template = einfo.get(constants.INISECT_INS,
7088
                                          "disk_template")
7089
      else:
7090
        raise errors.OpPrereqError("No disk template specified and the export"
7091
                                   " is missing the disk_template information",
7092
                                   errors.ECODE_INVAL)
7093

    
7094
    if not self.op.disks:
7095
      if einfo.has_option(constants.INISECT_INS, "disk_count"):
7096
        disks = []
7097
        # TODO: import the disk iv_name too
7098
        for idx in range(einfo.getint(constants.INISECT_INS, "disk_count")):
7099
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
7100
          disks.append({"size": disk_sz})
7101
        self.op.disks = disks
7102
      else:
7103
        raise errors.OpPrereqError("No disk info specified and the export"
7104
                                   " is missing the disk information",
7105
                                   errors.ECODE_INVAL)
7106

    
7107
    if (not self.op.nics and
7108
        einfo.has_option(constants.INISECT_INS, "nic_count")):
7109
      nics = []
7110
      for idx in range(einfo.getint(constants.INISECT_INS, "nic_count")):
7111
        ndict = {}
7112
        for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
7113
          v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
7114
          ndict[name] = v
7115
        nics.append(ndict)
7116
      self.op.nics = nics
7117

    
7118
    if (self.op.hypervisor is None and
7119
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
7120
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
7121
    if einfo.has_section(constants.INISECT_HYP):
7122
      # use the export parameters but do not override the ones
7123
      # specified by the user
7124
      for name, value in einfo.items(constants.INISECT_HYP):
7125
        if name not in self.op.hvparams:
7126
          self.op.hvparams[name] = value
7127

    
7128
    if einfo.has_section(constants.INISECT_BEP):
7129
      # use the parameters, without overriding
7130
      for name, value in einfo.items(constants.INISECT_BEP):
7131
        if name not in self.op.beparams:
7132
          self.op.beparams[name] = value
7133
    else:
7134
      # try to read the parameters old style, from the main section
7135
      for name in constants.BES_PARAMETERS:
7136
        if (name not in self.op.beparams and
7137
            einfo.has_option(constants.INISECT_INS, name)):
7138
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
7139

    
7140
    if einfo.has_section(constants.INISECT_OSP):
7141
      # use the parameters, without overriding
7142
      for name, value in einfo.items(constants.INISECT_OSP):
7143
        if name not in self.op.osparams:
7144
          self.op.osparams[name] = value
7145

    
7146
  def _RevertToDefaults(self, cluster):
7147
    """Revert the instance parameters to the default values.
7148

7149
    """
7150
    # hvparams
7151
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
7152
    for name in self.op.hvparams.keys():
7153
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
7154
        del self.op.hvparams[name]
7155
    # beparams
7156
    be_defs = cluster.SimpleFillBE({})
7157
    for name in self.op.beparams.keys():
7158
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
7159
        del self.op.beparams[name]
7160
    # nic params
7161
    nic_defs = cluster.SimpleFillNIC({})
7162
    for nic in self.op.nics:
7163
      for name in constants.NICS_PARAMETERS:
7164
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
7165
          del nic[name]
7166
    # osparams
7167
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
7168
    for name in self.op.osparams.keys():
7169
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
7170
        del self.op.osparams[name]
7171

    
7172
  def CheckPrereq(self):
7173
    """Check prerequisites.
7174

7175
    """
7176
    if self.op.mode == constants.INSTANCE_IMPORT:
7177
      export_info = self._ReadExportInfo()
7178
      self._ReadExportParams(export_info)
7179

    
7180
    _CheckDiskTemplate(self.op.disk_template)
7181

    
7182
    if (not self.cfg.GetVGName() and
7183
        self.op.disk_template not in constants.DTS_NOT_LVM):
7184
      raise errors.OpPrereqError("Cluster does not support lvm-based"
7185
                                 " instances", errors.ECODE_STATE)
7186

    
7187
    if self.op.hypervisor is None:
7188
      self.op.hypervisor = self.cfg.GetHypervisorType()
7189

    
7190
    cluster = self.cfg.GetClusterInfo()
7191
    enabled_hvs = cluster.enabled_hypervisors
7192
    if self.op.hypervisor not in enabled_hvs:
7193
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
7194
                                 " cluster (%s)" % (self.op.hypervisor,
7195
                                  ",".join(enabled_hvs)),
7196
                                 errors.ECODE_STATE)
7197

    
7198
    # check hypervisor parameter syntax (locally)
7199
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
7200
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
7201
                                      self.op.hvparams)
7202
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
7203
    hv_type.CheckParameterSyntax(filled_hvp)
7204
    self.hv_full = filled_hvp
7205
    # check that we don't specify global parameters on an instance
7206
    _CheckGlobalHvParams(self.op.hvparams)
7207

    
7208
    # fill and remember the beparams dict
7209
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
7210
    self.be_full = cluster.SimpleFillBE(self.op.beparams)
7211

    
7212
    # build os parameters
7213
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
7214

    
7215
    # now that hvp/bep are in final format, let's reset to defaults,
7216
    # if told to do so
7217
    if self.op.identify_defaults:
7218
      self._RevertToDefaults(cluster)
7219

    
7220
    # NIC buildup
7221
    self.nics = []
7222
    for idx, nic in enumerate(self.op.nics):
7223
      nic_mode_req = nic.get("mode", None)
7224
      nic_mode = nic_mode_req
7225
      if nic_mode is None:
7226
        nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
7227

    
7228
      # in routed mode, for the first nic, the default ip is 'auto'
7229
      if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
7230
        default_ip_mode = constants.VALUE_AUTO
7231
      else:
7232
        default_ip_mode = constants.VALUE_NONE
7233

    
7234
      # ip validity checks
7235
      ip = nic.get("ip", default_ip_mode)
7236
      if ip is None or ip.lower() == constants.VALUE_NONE:
7237
        nic_ip = None
7238
      elif ip.lower() == constants.VALUE_AUTO:
7239
        if not self.op.name_check:
7240
          raise errors.OpPrereqError("IP address set to auto but name checks"
7241
                                     " have been skipped",
7242
                                     errors.ECODE_INVAL)
7243
        nic_ip = self.hostname1.ip
7244
      else:
7245
        if not netutils.IPAddress.IsValid(ip):
7246
          raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
7247
                                     errors.ECODE_INVAL)
7248
        nic_ip = ip
7249

    
7250
      # TODO: check the ip address for uniqueness
7251
      if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
7252
        raise errors.OpPrereqError("Routed nic mode requires an ip address",
7253
                                   errors.ECODE_INVAL)
7254

    
7255
      # MAC address verification
7256
      mac = nic.get("mac", constants.VALUE_AUTO)
7257
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7258
        mac = utils.NormalizeAndValidateMac(mac)
7259

    
7260
        try:
7261
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
7262
        except errors.ReservationError:
7263
          raise errors.OpPrereqError("MAC address %s already in use"
7264
                                     " in cluster" % mac,
7265
                                     errors.ECODE_NOTUNIQUE)
7266

    
7267
      # bridge verification
7268
      bridge = nic.get("bridge", None)
7269
      link = nic.get("link", None)
7270
      if bridge and link:
7271
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
7272
                                   " at the same time", errors.ECODE_INVAL)
7273
      elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
7274
        raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic",
7275
                                   errors.ECODE_INVAL)
7276
      elif bridge:
7277
        link = bridge
7278

    
7279
      nicparams = {}
7280
      if nic_mode_req:
7281
        nicparams[constants.NIC_MODE] = nic_mode_req
7282
      if link:
7283
        nicparams[constants.NIC_LINK] = link
7284

    
7285
      check_params = cluster.SimpleFillNIC(nicparams)
7286
      objects.NIC.CheckParameterSyntax(check_params)
7287
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
7288

    
7289
    # disk checks/pre-build
7290
    self.disks = []
7291
    for disk in self.op.disks:
7292
      mode = disk.get("mode", constants.DISK_RDWR)
7293
      if mode not in constants.DISK_ACCESS_SET:
7294
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
7295
                                   mode, errors.ECODE_INVAL)
7296
      size = disk.get("size", None)
7297
      if size is None:
7298
        raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
7299
      try:
7300
        size = int(size)
7301
      except (TypeError, ValueError):
7302
        raise errors.OpPrereqError("Invalid disk size '%s'" % size,
7303
                                   errors.ECODE_INVAL)
7304
      new_disk = {"size": size, "mode": mode}
7305
      if "adopt" in disk:
7306
        new_disk["adopt"] = disk["adopt"]
7307
      self.disks.append(new_disk)
7308

    
7309
    if self.op.mode == constants.INSTANCE_IMPORT:
7310

    
7311
      # Check that the new instance doesn't have less disks than the export
7312
      instance_disks = len(self.disks)
7313
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
7314
      if instance_disks < export_disks:
7315
        raise errors.OpPrereqError("Not enough disks to import."
7316
                                   " (instance: %d, export: %d)" %
7317
                                   (instance_disks, export_disks),
7318
                                   errors.ECODE_INVAL)
7319

    
7320
      disk_images = []
7321
      for idx in range(export_disks):
7322
        option = 'disk%d_dump' % idx
7323
        if export_info.has_option(constants.INISECT_INS, option):
7324
          # FIXME: are the old os-es, disk sizes, etc. useful?
7325
          export_name = export_info.get(constants.INISECT_INS, option)
7326
          image = utils.PathJoin(self.op.src_path, export_name)
7327
          disk_images.append(image)
7328
        else:
7329
          disk_images.append(False)
7330

    
7331
      self.src_images = disk_images
7332

    
7333
      old_name = export_info.get(constants.INISECT_INS, 'name')
7334
      try:
7335
        exp_nic_count = export_info.getint(constants.INISECT_INS, 'nic_count')
7336
      except (TypeError, ValueError), err:
7337
        raise errors.OpPrereqError("Invalid export file, nic_count is not"
7338
                                   " an integer: %s" % str(err),
7339
                                   errors.ECODE_STATE)
7340
      if self.op.instance_name == old_name:
7341
        for idx, nic in enumerate(self.nics):
7342
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
7343
            nic_mac_ini = 'nic%d_mac' % idx
7344
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
7345

    
7346
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
7347

    
7348
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
7349
    if self.op.ip_check:
7350
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
7351
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
7352
                                   (self.check_ip, self.op.instance_name),
7353
                                   errors.ECODE_NOTUNIQUE)
7354

    
7355
    #### mac address generation
7356
    # By generating here the mac address both the allocator and the hooks get
7357
    # the real final mac address rather than the 'auto' or 'generate' value.
7358
    # There is a race condition between the generation and the instance object
7359
    # creation, which means that we know the mac is valid now, but we're not
7360
    # sure it will be when we actually add the instance. If things go bad
7361
    # adding the instance will abort because of a duplicate mac, and the
7362
    # creation job will fail.
7363
    for nic in self.nics:
7364
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7365
        nic.mac = self.cfg.GenerateMAC(self.proc.GetECId())
7366

    
7367
    #### allocator run
7368

    
7369
    if self.op.iallocator is not None:
7370
      self._RunAllocator()
7371

    
7372
    #### node related checks
7373

    
7374
    # check primary node
7375
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
7376
    assert self.pnode is not None, \
7377
      "Cannot retrieve locked node %s" % self.op.pnode
7378
    if pnode.offline:
7379
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
7380
                                 pnode.name, errors.ECODE_STATE)
7381
    if pnode.drained:
7382
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
7383
                                 pnode.name, errors.ECODE_STATE)
7384
    if not pnode.vm_capable:
7385
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
7386
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
7387

    
7388
    self.secondaries = []
7389

    
7390
    # mirror node verification
7391
    if self.op.disk_template in constants.DTS_NET_MIRROR:
7392
      if self.op.snode == pnode.name:
7393
        raise errors.OpPrereqError("The secondary node cannot be the"
7394
                                   " primary node.", errors.ECODE_INVAL)
7395
      _CheckNodeOnline(self, self.op.snode)
7396
      _CheckNodeNotDrained(self, self.op.snode)
7397
      _CheckNodeVmCapable(self, self.op.snode)
7398
      self.secondaries.append(self.op.snode)
7399

    
7400
    nodenames = [pnode.name] + self.secondaries
7401

    
7402
    req_size = _ComputeDiskSize(self.op.disk_template,
7403
                                self.disks)
7404

    
7405
    # Check lv size requirements, if not adopting
7406
    if req_size is not None and not self.adopt_disks:
7407
      _CheckNodesFreeDisk(self, nodenames, req_size)
7408

    
7409
    if self.adopt_disks: # instead, we must check the adoption data
7410
      all_lvs = set([i["adopt"] for i in self.disks])
7411
      if len(all_lvs) != len(self.disks):
7412
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
7413
                                   errors.ECODE_INVAL)
7414
      for lv_name in all_lvs:
7415
        try:
7416
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
7417
        except errors.ReservationError:
7418
          raise errors.OpPrereqError("LV named %s used by another instance" %
7419
                                     lv_name, errors.ECODE_NOTUNIQUE)
7420

    
7421
      node_lvs = self.rpc.call_lv_list([pnode.name],
7422
                                       self.cfg.GetVGName())[pnode.name]
7423
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
7424
      node_lvs = node_lvs.payload
7425
      delta = all_lvs.difference(node_lvs.keys())
7426
      if delta:
7427
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
7428
                                   utils.CommaJoin(delta),
7429
                                   errors.ECODE_INVAL)
7430
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
7431
      if online_lvs:
7432
        raise errors.OpPrereqError("Online logical volumes found, cannot"
7433
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
7434
                                   errors.ECODE_STATE)
7435
      # update the size of disk based on what is found
7436
      for dsk in self.disks:
7437
        dsk["size"] = int(float(node_lvs[dsk["adopt"]][0]))
7438

    
7439
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
7440

    
7441
    _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
7442
    # check OS parameters (remotely)
7443
    _CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
7444

    
7445
    _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
7446

    
7447
    # memory check on primary node
7448
    if self.op.start:
7449
      _CheckNodeFreeMemory(self, self.pnode.name,
7450
                           "creating instance %s" % self.op.instance_name,
7451
                           self.be_full[constants.BE_MEMORY],
7452
                           self.op.hypervisor)
7453

    
7454
    self.dry_run_result = list(nodenames)
7455

    
7456
  def Exec(self, feedback_fn):
7457
    """Create and add the instance to the cluster.
7458

7459
    """
7460
    instance = self.op.instance_name
7461
    pnode_name = self.pnode.name
7462

    
7463
    ht_kind = self.op.hypervisor
7464
    if ht_kind in constants.HTS_REQ_PORT:
7465
      network_port = self.cfg.AllocatePort()
7466
    else:
7467
      network_port = None
7468

    
7469
    if constants.ENABLE_FILE_STORAGE:
7470
      # this is needed because os.path.join does not accept None arguments
7471
      if self.op.file_storage_dir is None:
7472
        string_file_storage_dir = ""
7473
      else:
7474
        string_file_storage_dir = self.op.file_storage_dir
7475

    
7476
      # build the full file storage dir path
7477
      file_storage_dir = utils.PathJoin(self.cfg.GetFileStorageDir(),
7478
                                        string_file_storage_dir, instance)
7479
    else:
7480
      file_storage_dir = ""
7481

    
7482
    disks = _GenerateDiskTemplate(self,
7483
                                  self.op.disk_template,
7484
                                  instance, pnode_name,
7485
                                  self.secondaries,
7486
                                  self.disks,
7487
                                  file_storage_dir,
7488
                                  self.op.file_driver,
7489
                                  0)
7490

    
7491
    iobj = objects.Instance(name=instance, os=self.op.os_type,
7492
                            primary_node=pnode_name,
7493
                            nics=self.nics, disks=disks,
7494
                            disk_template=self.op.disk_template,
7495
                            admin_up=False,
7496
                            network_port=network_port,
7497
                            beparams=self.op.beparams,
7498
                            hvparams=self.op.hvparams,
7499
                            hypervisor=self.op.hypervisor,
7500
                            osparams=self.op.osparams,
7501
                            )
7502

    
7503
    if self.adopt_disks:
7504
      # rename LVs to the newly-generated names; we need to construct
7505
      # 'fake' LV disks with the old data, plus the new unique_id
7506
      tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
7507
      rename_to = []
7508
      for t_dsk, a_dsk in zip (tmp_disks, self.disks):
7509
        rename_to.append(t_dsk.logical_id)
7510
        t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk["adopt"])
7511
        self.cfg.SetDiskID(t_dsk, pnode_name)
7512
      result = self.rpc.call_blockdev_rename(pnode_name,
7513
                                             zip(tmp_disks, rename_to))
7514
      result.Raise("Failed to rename adoped LVs")
7515
    else:
7516
      feedback_fn("* creating instance disks...")
7517
      try:
7518
        _CreateDisks(self, iobj)
7519
      except errors.OpExecError:
7520
        self.LogWarning("Device creation failed, reverting...")
7521
        try:
7522
          _RemoveDisks(self, iobj)
7523
        finally:
7524
          self.cfg.ReleaseDRBDMinors(instance)
7525
          raise
7526

    
7527
      if self.cfg.GetClusterInfo().prealloc_wipe_disks:
7528
        feedback_fn("* wiping instance disks...")
7529
        try:
7530
          _WipeDisks(self, iobj)
7531
        except errors.OpExecError:
7532
          self.LogWarning("Device wiping failed, reverting...")
7533
          try:
7534
            _RemoveDisks(self, iobj)
7535
          finally:
7536
            self.cfg.ReleaseDRBDMinors(instance)
7537
            raise
7538

    
7539
    feedback_fn("adding instance %s to cluster config" % instance)
7540

    
7541
    self.cfg.AddInstance(iobj, self.proc.GetECId())
7542

    
7543
    # Declare that we don't want to remove the instance lock anymore, as we've
7544
    # added the instance to the config
7545
    del self.remove_locks[locking.LEVEL_INSTANCE]
7546
    # Unlock all the nodes
7547
    if self.op.mode == constants.INSTANCE_IMPORT:
7548
      nodes_keep = [self.op.src_node]
7549
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
7550
                       if node != self.op.src_node]
7551
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
7552
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
7553
    else:
7554
      self.context.glm.release(locking.LEVEL_NODE)
7555
      del self.acquired_locks[locking.LEVEL_NODE]
7556

    
7557
    if self.op.wait_for_sync:
7558
      disk_abort = not _WaitForSync(self, iobj)
7559
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
7560
      # make sure the disks are not degraded (still sync-ing is ok)
7561
      time.sleep(15)
7562
      feedback_fn("* checking mirrors status")
7563
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
7564
    else:
7565
      disk_abort = False
7566

    
7567
    if disk_abort:
7568
      _RemoveDisks(self, iobj)
7569
      self.cfg.RemoveInstance(iobj.name)
7570
      # Make sure the instance lock gets removed
7571
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
7572
      raise errors.OpExecError("There are some degraded disks for"
7573
                               " this instance")
7574

    
7575
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
7576
      if self.op.mode == constants.INSTANCE_CREATE:
7577
        if not self.op.no_install:
7578
          feedback_fn("* running the instance OS create scripts...")
7579
          # FIXME: pass debug option from opcode to backend
7580
          result = self.rpc.call_instance_os_add(pnode_name, iobj, False,
7581
                                                 self.op.debug_level)
7582
          result.Raise("Could not add os for instance %s"
7583
                       " on node %s" % (instance, pnode_name))
7584

    
7585
      elif self.op.mode == constants.INSTANCE_IMPORT:
7586
        feedback_fn("* running the instance OS import scripts...")
7587

    
7588
        transfers = []
7589

    
7590
        for idx, image in enumerate(self.src_images):
7591
          if not image:
7592
            continue
7593

    
7594
          # FIXME: pass debug option from opcode to backend
7595
          dt = masterd.instance.DiskTransfer("disk/%s" % idx,
7596
                                             constants.IEIO_FILE, (image, ),
7597
                                             constants.IEIO_SCRIPT,
7598
                                             (iobj.disks[idx], idx),
7599
                                             None)
7600
          transfers.append(dt)
7601

    
7602
        import_result = \
7603
          masterd.instance.TransferInstanceData(self, feedback_fn,
7604
                                                self.op.src_node, pnode_name,
7605
                                                self.pnode.secondary_ip,
7606
                                                iobj, transfers)
7607
        if not compat.all(import_result):
7608
          self.LogWarning("Some disks for instance %s on node %s were not"
7609
                          " imported successfully" % (instance, pnode_name))
7610

    
7611
      elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
7612
        feedback_fn("* preparing remote import...")
7613
        connect_timeout = constants.RIE_CONNECT_TIMEOUT
7614
        timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
7615

    
7616
        disk_results = masterd.instance.RemoteImport(self, feedback_fn, iobj,
7617
                                                     self.source_x509_ca,
7618
                                                     self._cds, timeouts)
7619
        if not compat.all(disk_results):
7620
          # TODO: Should the instance still be started, even if some disks
7621
          # failed to import (valid for local imports, too)?
7622
          self.LogWarning("Some disks for instance %s on node %s were not"
7623
                          " imported successfully" % (instance, pnode_name))
7624

    
7625
        # Run rename script on newly imported instance
7626
        assert iobj.name == instance
7627
        feedback_fn("Running rename script for %s" % instance)
7628
        result = self.rpc.call_instance_run_rename(pnode_name, iobj,
7629
                                                   self.source_instance_name,
7630
                                                   self.op.debug_level)
7631
        if result.fail_msg:
7632
          self.LogWarning("Failed to run rename script for %s on node"
7633
                          " %s: %s" % (instance, pnode_name, result.fail_msg))
7634

    
7635
      else:
7636
        # also checked in the prereq part
7637
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
7638
                                     % self.op.mode)
7639

    
7640
    if self.op.start:
7641
      iobj.admin_up = True
7642
      self.cfg.Update(iobj, feedback_fn)
7643
      logging.info("Starting instance %s on node %s", instance, pnode_name)
7644
      feedback_fn("* starting instance...")
7645
      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
7646
      result.Raise("Could not start instance")
7647

    
7648
    return list(iobj.all_nodes)
7649

    
7650

    
7651
class LUConnectConsole(NoHooksLU):
7652
  """Connect to an instance's console.
7653

7654
  This is somewhat special in that it returns the command line that
7655
  you need to run on the master node in order to connect to the
7656
  console.
7657

7658
  """
7659
  _OP_PARAMS = [
7660
    _PInstanceName
7661
    ]
7662
  REQ_BGL = False
7663

    
7664
  def ExpandNames(self):
7665
    self._ExpandAndLockInstance()
7666

    
7667
  def CheckPrereq(self):
7668
    """Check prerequisites.
7669

7670
    This checks that the instance is in the cluster.
7671

7672
    """
7673
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7674
    assert self.instance is not None, \
7675
      "Cannot retrieve locked instance %s" % self.op.instance_name
7676
    _CheckNodeOnline(self, self.instance.primary_node)
7677

    
7678
  def Exec(self, feedback_fn):
7679
    """Connect to the console of an instance
7680

7681
    """
7682
    instance = self.instance
7683
    node = instance.primary_node
7684

    
7685
    node_insts = self.rpc.call_instance_list([node],
7686
                                             [instance.hypervisor])[node]
7687
    node_insts.Raise("Can't get node information from %s" % node)
7688

    
7689
    if instance.name not in node_insts.payload:
7690
      if instance.admin_up:
7691
        state = "ERROR_down"
7692
      else:
7693
        state = "ADMIN_down"
7694
      raise errors.OpExecError("Instance %s is not running (state %s)" %
7695
                               (instance.name, state))
7696

    
7697
    logging.debug("Connecting to console of %s on %s", instance.name, node)
7698

    
7699
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
7700
    cluster = self.cfg.GetClusterInfo()
7701
    # beparams and hvparams are passed separately, to avoid editing the
7702
    # instance and then saving the defaults in the instance itself.
7703
    hvparams = cluster.FillHV(instance)
7704
    beparams = cluster.FillBE(instance)
7705
    console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
7706

    
7707
    # build ssh cmdline
7708
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
7709

    
7710

    
7711
class LUReplaceDisks(LogicalUnit):
7712
  """Replace the disks of an instance.
7713

7714
  """
7715
  HPATH = "mirrors-replace"
7716
  HTYPE = constants.HTYPE_INSTANCE
7717
  _OP_PARAMS = [
7718
    _PInstanceName,
7719
    ("mode", ht.NoDefault, ht.TElemOf(constants.REPLACE_MODES)),
7720
    ("disks", ht.EmptyList, ht.TListOf(ht.TPositiveInt)),
7721
    ("remote_node", None, ht.TMaybeString),
7722
    ("iallocator", None, ht.TMaybeString),
7723
    ("early_release", False, ht.TBool),
7724
    ]
7725
  REQ_BGL = False
7726

    
7727
  def CheckArguments(self):
7728
    TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
7729
                                  self.op.iallocator)
7730

    
7731
  def ExpandNames(self):
7732
    self._ExpandAndLockInstance()
7733

    
7734
    if self.op.iallocator is not None:
7735
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7736

    
7737
    elif self.op.remote_node is not None:
7738
      remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
7739
      self.op.remote_node = remote_node
7740

    
7741
      # Warning: do not remove the locking of the new secondary here
7742
      # unless DRBD8.AddChildren is changed to work in parallel;
7743
      # currently it doesn't since parallel invocations of
7744
      # FindUnusedMinor will conflict
7745
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
7746
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
7747

    
7748
    else:
7749
      self.needed_locks[locking.LEVEL_NODE] = []
7750
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7751

    
7752
    self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
7753
                                   self.op.iallocator, self.op.remote_node,
7754
                                   self.op.disks, False, self.op.early_release)
7755

    
7756
    self.tasklets = [self.replacer]
7757

    
7758
  def DeclareLocks(self, level):
7759
    # If we're not already locking all nodes in the set we have to declare the
7760
    # instance's primary/secondary nodes.
7761
    if (level == locking.LEVEL_NODE and
7762
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
7763
      self._LockInstancesNodes()
7764

    
7765
  def BuildHooksEnv(self):
7766
    """Build hooks env.
7767

7768
    This runs on the master, the primary and all the secondaries.
7769

7770
    """
7771
    instance = self.replacer.instance
7772
    env = {
7773
      "MODE": self.op.mode,
7774
      "NEW_SECONDARY": self.op.remote_node,
7775
      "OLD_SECONDARY": instance.secondary_nodes[0],
7776
      }
7777
    env.update(_BuildInstanceHookEnvByObject(self, instance))
7778
    nl = [
7779
      self.cfg.GetMasterNode(),
7780
      instance.primary_node,
7781
      ]
7782
    if self.op.remote_node is not None:
7783
      nl.append(self.op.remote_node)
7784
    return env, nl, nl
7785

    
7786

    
7787
class TLReplaceDisks(Tasklet):
7788
  """Replaces disks for an instance.
7789

7790
  Note: Locking is not within the scope of this class.
7791

7792
  """
7793
  def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
7794
               disks, delay_iallocator, early_release):
7795
    """Initializes this class.
7796

7797
    """
7798
    Tasklet.__init__(self, lu)
7799

    
7800
    # Parameters
7801
    self.instance_name = instance_name
7802
    self.mode = mode
7803
    self.iallocator_name = iallocator_name
7804
    self.remote_node = remote_node
7805
    self.disks = disks
7806
    self.delay_iallocator = delay_iallocator
7807
    self.early_release = early_release
7808

    
7809
    # Runtime data
7810
    self.instance = None
7811
    self.new_node = None
7812
    self.target_node = None
7813
    self.other_node = None
7814
    self.remote_node_info = None
7815
    self.node_secondary_ip = None
7816

    
7817
  @staticmethod
7818
  def CheckArguments(mode, remote_node, iallocator):
7819
    """Helper function for users of this class.
7820

7821
    """
7822
    # check for valid parameter combination
7823
    if mode == constants.REPLACE_DISK_CHG:
7824
      if remote_node is None and iallocator is None:
7825
        raise errors.OpPrereqError("When changing the secondary either an"
7826
                                   " iallocator script must be used or the"
7827
                                   " new node given", errors.ECODE_INVAL)
7828

    
7829
      if remote_node is not None and iallocator is not None:
7830
        raise errors.OpPrereqError("Give either the iallocator or the new"
7831
                                   " secondary, not both", errors.ECODE_INVAL)
7832

    
7833
    elif remote_node is not None or iallocator is not None:
7834
      # Not replacing the secondary
7835
      raise errors.OpPrereqError("The iallocator and new node options can"
7836
                                 " only be used when changing the"
7837
                                 " secondary node", errors.ECODE_INVAL)
7838

    
7839
  @staticmethod
7840
  def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
7841
    """Compute a new secondary node using an IAllocator.
7842

7843
    """
7844
    ial = IAllocator(lu.cfg, lu.rpc,
7845
                     mode=constants.IALLOCATOR_MODE_RELOC,
7846
                     name=instance_name,
7847
                     relocate_from=relocate_from)
7848

    
7849
    ial.Run(iallocator_name)
7850

    
7851
    if not ial.success:
7852
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
7853
                                 " %s" % (iallocator_name, ial.info),
7854
                                 errors.ECODE_NORES)
7855

    
7856
    if len(ial.result) != ial.required_nodes:
7857
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
7858
                                 " of nodes (%s), required %s" %
7859
                                 (iallocator_name,
7860
                                  len(ial.result), ial.required_nodes),
7861
                                 errors.ECODE_FAULT)
7862

    
7863
    remote_node_name = ial.result[0]
7864

    
7865
    lu.LogInfo("Selected new secondary for instance '%s': %s",
7866
               instance_name, remote_node_name)
7867

    
7868
    return remote_node_name
7869

    
7870
  def _FindFaultyDisks(self, node_name):
7871
    return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
7872
                                    node_name, True)
7873

    
7874
  def CheckPrereq(self):
7875
    """Check prerequisites.
7876

7877
    This checks that the instance is in the cluster.
7878

7879
    """
7880
    self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
7881
    assert instance is not None, \
7882
      "Cannot retrieve locked instance %s" % self.instance_name
7883

    
7884
    if instance.disk_template != constants.DT_DRBD8:
7885
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
7886
                                 " instances", errors.ECODE_INVAL)
7887

    
7888
    if len(instance.secondary_nodes) != 1:
7889
      raise errors.OpPrereqError("The instance has a strange layout,"
7890
                                 " expected one secondary but found %d" %
7891
                                 len(instance.secondary_nodes),
7892
                                 errors.ECODE_FAULT)
7893

    
7894
    if not self.delay_iallocator:
7895
      self._CheckPrereq2()
7896

    
7897
  def _CheckPrereq2(self):
7898
    """Check prerequisites, second part.
7899

7900
    This function should always be part of CheckPrereq. It was separated and is
7901
    now called from Exec because during node evacuation iallocator was only
7902
    called with an unmodified cluster model, not taking planned changes into
7903
    account.
7904

7905
    """
7906
    instance = self.instance
7907
    secondary_node = instance.secondary_nodes[0]
7908

    
7909
    if self.iallocator_name is None:
7910
      remote_node = self.remote_node
7911
    else:
7912
      remote_node = self._RunAllocator(self.lu, self.iallocator_name,
7913
                                       instance.name, instance.secondary_nodes)
7914

    
7915
    if remote_node is not None:
7916
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
7917
      assert self.remote_node_info is not None, \
7918
        "Cannot retrieve locked node %s" % remote_node
7919
    else:
7920
      self.remote_node_info = None
7921

    
7922
    if remote_node == self.instance.primary_node:
7923
      raise errors.OpPrereqError("The specified node is the primary node of"
7924
                                 " the instance.", errors.ECODE_INVAL)
7925

    
7926
    if remote_node == secondary_node:
7927
      raise errors.OpPrereqError("The specified node is already the"
7928
                                 " secondary node of the instance.",
7929
                                 errors.ECODE_INVAL)
7930

    
7931
    if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
7932
                                    constants.REPLACE_DISK_CHG):
7933
      raise errors.OpPrereqError("Cannot specify disks to be replaced",
7934
                                 errors.ECODE_INVAL)
7935

    
7936
    if self.mode == constants.REPLACE_DISK_AUTO:
7937
      faulty_primary = self._FindFaultyDisks(instance.primary_node)
7938
      faulty_secondary = self._FindFaultyDisks(secondary_node)
7939

    
7940
      if faulty_primary and faulty_secondary:
7941
        raise errors.OpPrereqError("Instance %s has faulty disks on more than"
7942
                                   " one node and can not be repaired"
7943
                                   " automatically" % self.instance_name,
7944
                                   errors.ECODE_STATE)
7945

    
7946
      if faulty_primary:
7947
        self.disks = faulty_primary
7948
        self.target_node = instance.primary_node
7949
        self.other_node = secondary_node
7950
        check_nodes = [self.target_node, self.other_node]
7951
      elif faulty_secondary:
7952
        self.disks = faulty_secondary
7953
        self.target_node = secondary_node
7954
        self.other_node = instance.primary_node
7955
        check_nodes = [self.target_node, self.other_node]
7956
      else:
7957
        self.disks = []
7958
        check_nodes = []
7959

    
7960
    else:
7961
      # Non-automatic modes
7962
      if self.mode == constants.REPLACE_DISK_PRI:
7963
        self.target_node = instance.primary_node
7964
        self.other_node = secondary_node
7965
        check_nodes = [self.target_node, self.other_node]
7966

    
7967
      elif self.mode == constants.REPLACE_DISK_SEC:
7968
        self.target_node = secondary_node
7969
        self.other_node = instance.primary_node
7970
        check_nodes = [self.target_node, self.other_node]
7971

    
7972
      elif self.mode == constants.REPLACE_DISK_CHG:
7973
        self.new_node = remote_node
7974
        self.other_node = instance.primary_node
7975
        self.target_node = secondary_node
7976
        check_nodes = [self.new_node, self.other_node]
7977

    
7978
        _CheckNodeNotDrained(self.lu, remote_node)
7979
        _CheckNodeVmCapable(self.lu, remote_node)
7980

    
7981
        old_node_info = self.cfg.GetNodeInfo(secondary_node)
7982
        assert old_node_info is not None
7983
        if old_node_info.offline and not self.early_release:
7984
          # doesn't make sense to delay the release
7985
          self.early_release = True
7986
          self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
7987
                          " early-release mode", secondary_node)
7988

    
7989
      else:
7990
        raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
7991
                                     self.mode)
7992

    
7993
      # If not specified all disks should be replaced
7994
      if not self.disks:
7995
        self.disks = range(len(self.instance.disks))
7996

    
7997
    for node in check_nodes:
7998
      _CheckNodeOnline(self.lu, node)
7999

    
8000
    # Check whether disks are valid
8001
    for disk_idx in self.disks:
8002
      instance.FindDisk(disk_idx)
8003

    
8004
    # Get secondary node IP addresses
8005
    node_2nd_ip = {}
8006

    
8007
    for node_name in [self.target_node, self.other_node, self.new_node]:
8008
      if node_name is not None:
8009
        node_2nd_ip[node_name] = self.cfg.GetNodeInfo(node_name).secondary_ip
8010

    
8011
    self.node_secondary_ip = node_2nd_ip
8012

    
8013
  def Exec(self, feedback_fn):
8014
    """Execute disk replacement.
8015

8016
    This dispatches the disk replacement to the appropriate handler.
8017

8018
    """
8019
    if self.delay_iallocator:
8020
      self._CheckPrereq2()
8021

    
8022
    if not self.disks:
8023
      feedback_fn("No disks need replacement")
8024
      return
8025

    
8026
    feedback_fn("Replacing disk(s) %s for %s" %
8027
                (utils.CommaJoin(self.disks), self.instance.name))
8028

    
8029
    activate_disks = (not self.instance.admin_up)
8030

    
8031
    # Activate the instance disks if we're replacing them on a down instance
8032
    if activate_disks:
8033
      _StartInstanceDisks(self.lu, self.instance, True)
8034

    
8035
    try:
8036
      # Should we replace the secondary node?
8037
      if self.new_node is not None:
8038
        fn = self._ExecDrbd8Secondary
8039
      else:
8040
        fn = self._ExecDrbd8DiskOnly
8041

    
8042
      return fn(feedback_fn)
8043

    
8044
    finally:
8045
      # Deactivate the instance disks if we're replacing them on a
8046
      # down instance
8047
      if activate_disks:
8048
        _SafeShutdownInstanceDisks(self.lu, self.instance)
8049

    
8050
  def _CheckVolumeGroup(self, nodes):
8051
    self.lu.LogInfo("Checking volume groups")
8052

    
8053
    vgname = self.cfg.GetVGName()
8054

    
8055
    # Make sure volume group exists on all involved nodes
8056
    results = self.rpc.call_vg_list(nodes)
8057
    if not results:
8058
      raise errors.OpExecError("Can't list volume groups on the nodes")
8059

    
8060
    for node in nodes:
8061
      res = results[node]
8062
      res.Raise("Error checking node %s" % node)
8063
      if vgname not in res.payload:
8064
        raise errors.OpExecError("Volume group '%s' not found on node %s" %
8065
                                 (vgname, node))
8066

    
8067
  def _CheckDisksExistence(self, nodes):
8068
    # Check disk existence
8069
    for idx, dev in enumerate(self.instance.disks):
8070
      if idx not in self.disks:
8071
        continue
8072

    
8073
      for node in nodes:
8074
        self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
8075
        self.cfg.SetDiskID(dev, node)
8076

    
8077
        result = self.rpc.call_blockdev_find(node, dev)
8078

    
8079
        msg = result.fail_msg
8080
        if msg or not result.payload:
8081
          if not msg:
8082
            msg = "disk not found"
8083
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
8084
                                   (idx, node, msg))
8085

    
8086
  def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
8087
    for idx, dev in enumerate(self.instance.disks):
8088
      if idx not in self.disks:
8089
        continue
8090

    
8091
      self.lu.LogInfo("Checking disk/%d consistency on node %s" %
8092
                      (idx, node_name))
8093

    
8094
      if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary,
8095
                                   ldisk=ldisk):
8096
        raise errors.OpExecError("Node %s has degraded storage, unsafe to"
8097
                                 " replace disks for instance %s" %
8098
                                 (node_name, self.instance.name))
8099

    
8100
  def _CreateNewStorage(self, node_name):
8101
    vgname = self.cfg.GetVGName()
8102
    iv_names = {}
8103

    
8104
    for idx, dev in enumerate(self.instance.disks):
8105
      if idx not in self.disks:
8106
        continue
8107

    
8108
      self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
8109

    
8110
      self.cfg.SetDiskID(dev, node_name)
8111

    
8112
      lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
8113
      names = _GenerateUniqueNames(self.lu, lv_names)
8114

    
8115
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
8116
                             logical_id=(vgname, names[0]))
8117
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
8118
                             logical_id=(vgname, names[1]))
8119

    
8120
      new_lvs = [lv_data, lv_meta]
8121
      old_lvs = dev.children
8122
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
8123

    
8124
      # we pass force_create=True to force the LVM creation
8125
      for new_lv in new_lvs:
8126
        _CreateBlockDev(self.lu, node_name, self.instance, new_lv, True,
8127
                        _GetInstanceInfoText(self.instance), False)
8128

    
8129
    return iv_names
8130

    
8131
  def _CheckDevices(self, node_name, iv_names):
8132
    for name, (dev, _, _) in iv_names.iteritems():
8133
      self.cfg.SetDiskID(dev, node_name)
8134

    
8135
      result = self.rpc.call_blockdev_find(node_name, dev)
8136

    
8137
      msg = result.fail_msg
8138
      if msg or not result.payload:
8139
        if not msg:
8140
          msg = "disk not found"
8141
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
8142
                                 (name, msg))
8143

    
8144
      if result.payload.is_degraded:
8145
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
8146

    
8147
  def _RemoveOldStorage(self, node_name, iv_names):
8148
    for name, (_, old_lvs, _) in iv_names.iteritems():
8149
      self.lu.LogInfo("Remove logical volumes for %s" % name)
8150

    
8151
      for lv in old_lvs:
8152
        self.cfg.SetDiskID(lv, node_name)
8153

    
8154
        msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
8155
        if msg:
8156
          self.lu.LogWarning("Can't remove old LV: %s" % msg,
8157
                             hint="remove unused LVs manually")
8158

    
8159
  def _ReleaseNodeLock(self, node_name):
8160
    """Releases the lock for a given node."""
8161
    self.lu.context.glm.release(locking.LEVEL_NODE, node_name)
8162

    
8163
  def _ExecDrbd8DiskOnly(self, feedback_fn):
8164
    """Replace a disk on the primary or secondary for DRBD 8.
8165

8166
    The algorithm for replace is quite complicated:
8167

8168
      1. for each disk to be replaced:
8169

8170
        1. create new LVs on the target node with unique names
8171
        1. detach old LVs from the drbd device
8172
        1. rename old LVs to name_replaced.<time_t>
8173
        1. rename new LVs to old LVs
8174
        1. attach the new LVs (with the old names now) to the drbd device
8175

8176
      1. wait for sync across all devices
8177

8178
      1. for each modified disk:
8179

8180
        1. remove old LVs (which have the name name_replaces.<time_t>)
8181

8182
    Failures are not very well handled.
8183

8184
    """
8185
    steps_total = 6
8186

    
8187
    # Step: check device activation
8188
    self.lu.LogStep(1, steps_total, "Check device existence")
8189
    self._CheckDisksExistence([self.other_node, self.target_node])
8190
    self._CheckVolumeGroup([self.target_node, self.other_node])
8191

    
8192
    # Step: check other node consistency
8193
    self.lu.LogStep(2, steps_total, "Check peer consistency")
8194
    self._CheckDisksConsistency(self.other_node,
8195
                                self.other_node == self.instance.primary_node,
8196
                                False)
8197

    
8198
    # Step: create new storage
8199
    self.lu.LogStep(3, steps_total, "Allocate new storage")
8200
    iv_names = self._CreateNewStorage(self.target_node)
8201

    
8202
    # Step: for each lv, detach+rename*2+attach
8203
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
8204
    for dev, old_lvs, new_lvs in iv_names.itervalues():
8205
      self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
8206

    
8207
      result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
8208
                                                     old_lvs)
8209
      result.Raise("Can't detach drbd from local storage on node"
8210
                   " %s for device %s" % (self.target_node, dev.iv_name))
8211
      #dev.children = []
8212
      #cfg.Update(instance)
8213

    
8214
      # ok, we created the new LVs, so now we know we have the needed
8215
      # storage; as such, we proceed on the target node to rename
8216
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
8217
      # using the assumption that logical_id == physical_id (which in
8218
      # turn is the unique_id on that node)
8219

    
8220
      # FIXME(iustin): use a better name for the replaced LVs
8221
      temp_suffix = int(time.time())
8222
      ren_fn = lambda d, suff: (d.physical_id[0],
8223
                                d.physical_id[1] + "_replaced-%s" % suff)
8224

    
8225
      # Build the rename list based on what LVs exist on the node
8226
      rename_old_to_new = []
8227
      for to_ren in old_lvs:
8228
        result = self.rpc.call_blockdev_find(self.target_node, to_ren)
8229
        if not result.fail_msg and result.payload:
8230
          # device exists
8231
          rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
8232

    
8233
      self.lu.LogInfo("Renaming the old LVs on the target node")
8234
      result = self.rpc.call_blockdev_rename(self.target_node,
8235
                                             rename_old_to_new)
8236
      result.Raise("Can't rename old LVs on node %s" % self.target_node)
8237

    
8238
      # Now we rename the new LVs to the old LVs
8239
      self.lu.LogInfo("Renaming the new LVs on the target node")
8240
      rename_new_to_old = [(new, old.physical_id)
8241
                           for old, new in zip(old_lvs, new_lvs)]
8242
      result = self.rpc.call_blockdev_rename(self.target_node,
8243
                                             rename_new_to_old)
8244
      result.Raise("Can't rename new LVs on node %s" % self.target_node)
8245

    
8246
      for old, new in zip(old_lvs, new_lvs):
8247
        new.logical_id = old.logical_id
8248
        self.cfg.SetDiskID(new, self.target_node)
8249

    
8250
      for disk in old_lvs:
8251
        disk.logical_id = ren_fn(disk, temp_suffix)
8252
        self.cfg.SetDiskID(disk, self.target_node)
8253

    
8254
      # Now that the new lvs have the old name, we can add them to the device
8255
      self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
8256
      result = self.rpc.call_blockdev_addchildren(self.target_node, dev,
8257
                                                  new_lvs)
8258
      msg = result.fail_msg
8259
      if msg:
8260
        for new_lv in new_lvs:
8261
          msg2 = self.rpc.call_blockdev_remove(self.target_node,
8262
                                               new_lv).fail_msg
8263
          if msg2:
8264
            self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
8265
                               hint=("cleanup manually the unused logical"
8266
                                     "volumes"))
8267
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
8268

    
8269
      dev.children = new_lvs
8270

    
8271
      self.cfg.Update(self.instance, feedback_fn)
8272

    
8273
    cstep = 5
8274
    if self.early_release:
8275
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
8276
      cstep += 1
8277
      self._RemoveOldStorage(self.target_node, iv_names)
8278
      # WARNING: we release both node locks here, do not do other RPCs
8279
      # than WaitForSync to the primary node
8280
      self._ReleaseNodeLock([self.target_node, self.other_node])
8281

    
8282
    # Wait for sync
8283
    # This can fail as the old devices are degraded and _WaitForSync
8284
    # does a combined result over all disks, so we don't check its return value
8285
    self.lu.LogStep(cstep, steps_total, "Sync devices")
8286
    cstep += 1
8287
    _WaitForSync(self.lu, self.instance)
8288

    
8289
    # Check all devices manually
8290
    self._CheckDevices(self.instance.primary_node, iv_names)
8291

    
8292
    # Step: remove old storage
8293
    if not self.early_release:
8294
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
8295
      cstep += 1
8296
      self._RemoveOldStorage(self.target_node, iv_names)
8297

    
8298
  def _ExecDrbd8Secondary(self, feedback_fn):
8299
    """Replace the secondary node for DRBD 8.
8300

8301
    The algorithm for replace is quite complicated:
8302
      - for all disks of the instance:
8303
        - create new LVs on the new node with same names
8304
        - shutdown the drbd device on the old secondary
8305
        - disconnect the drbd network on the primary
8306
        - create the drbd device on the new secondary
8307
        - network attach the drbd on the primary, using an artifice:
8308
          the drbd code for Attach() will connect to the network if it
8309
          finds a device which is connected to the good local disks but
8310
          not network enabled
8311
      - wait for sync across all devices
8312
      - remove all disks from the old secondary
8313

8314
    Failures are not very well handled.
8315

8316
    """
8317
    steps_total = 6
8318

    
8319
    # Step: check device activation
8320
    self.lu.LogStep(1, steps_total, "Check device existence")
8321
    self._CheckDisksExistence([self.instance.primary_node])
8322
    self._CheckVolumeGroup([self.instance.primary_node])
8323

    
8324
    # Step: check other node consistency
8325
    self.lu.LogStep(2, steps_total, "Check peer consistency")
8326
    self._CheckDisksConsistency(self.instance.primary_node, True, True)
8327

    
8328
    # Step: create new storage
8329
    self.lu.LogStep(3, steps_total, "Allocate new storage")
8330
    for idx, dev in enumerate(self.instance.disks):
8331
      self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
8332
                      (self.new_node, idx))
8333
      # we pass force_create=True to force LVM creation
8334
      for new_lv in dev.children:
8335
        _CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True,
8336
                        _GetInstanceInfoText(self.instance), False)
8337

    
8338
    # Step 4: dbrd minors and drbd setups changes
8339
    # after this, we must manually remove the drbd minors on both the
8340
    # error and the success paths
8341
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
8342
    minors = self.cfg.AllocateDRBDMinor([self.new_node
8343
                                         for dev in self.instance.disks],
8344
                                        self.instance.name)
8345
    logging.debug("Allocated minors %r", minors)
8346

    
8347
    iv_names = {}
8348
    for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
8349
      self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
8350
                      (self.new_node, idx))
8351
      # create new devices on new_node; note that we create two IDs:
8352
      # one without port, so the drbd will be activated without
8353
      # networking information on the new node at this stage, and one
8354
      # with network, for the latter activation in step 4
8355
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
8356
      if self.instance.primary_node == o_node1:
8357
        p_minor = o_minor1
8358
      else:
8359
        assert self.instance.primary_node == o_node2, "Three-node instance?"
8360
        p_minor = o_minor2
8361

    
8362
      new_alone_id = (self.instance.primary_node, self.new_node, None,
8363
                      p_minor, new_minor, o_secret)
8364
      new_net_id = (self.instance.primary_node, self.new_node, o_port,
8365
                    p_minor, new_minor, o_secret)
8366

    
8367
      iv_names[idx] = (dev, dev.children, new_net_id)
8368
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
8369
                    new_net_id)
8370
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
8371
                              logical_id=new_alone_id,
8372
                              children=dev.children,
8373
                              size=dev.size)
8374
      try:
8375
        _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
8376
                              _GetInstanceInfoText(self.instance), False)
8377
      except errors.GenericError:
8378
        self.cfg.ReleaseDRBDMinors(self.instance.name)
8379
        raise
8380

    
8381
    # We have new devices, shutdown the drbd on the old secondary
8382
    for idx, dev in enumerate(self.instance.disks):
8383
      self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
8384
      self.cfg.SetDiskID(dev, self.target_node)
8385
      msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
8386
      if msg:
8387
        self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
8388
                           "node: %s" % (idx, msg),
8389
                           hint=("Please cleanup this device manually as"
8390
                                 " soon as possible"))
8391

    
8392
    self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
8393
    result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node],
8394
                                               self.node_secondary_ip,
8395
                                               self.instance.disks)\
8396
                                              [self.instance.primary_node]
8397

    
8398
    msg = result.fail_msg
8399
    if msg:
8400
      # detaches didn't succeed (unlikely)
8401
      self.cfg.ReleaseDRBDMinors(self.instance.name)
8402
      raise errors.OpExecError("Can't detach the disks from the network on"
8403
                               " old node: %s" % (msg,))
8404

    
8405
    # if we managed to detach at least one, we update all the disks of
8406
    # the instance to point to the new secondary
8407
    self.lu.LogInfo("Updating instance configuration")
8408
    for dev, _, new_logical_id in iv_names.itervalues():
8409
      dev.logical_id = new_logical_id
8410
      self.cfg.SetDiskID(dev, self.instance.primary_node)
8411

    
8412
    self.cfg.Update(self.instance, feedback_fn)
8413

    
8414
    # and now perform the drbd attach
8415
    self.lu.LogInfo("Attaching primary drbds to new secondary"
8416
                    " (standalone => connected)")
8417
    result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
8418
                                            self.new_node],
8419
                                           self.node_secondary_ip,
8420
                                           self.instance.disks,
8421
                                           self.instance.name,
8422
                                           False)
8423
    for to_node, to_result in result.items():
8424
      msg = to_result.fail_msg
8425
      if msg:
8426
        self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
8427
                           to_node, msg,
8428
                           hint=("please do a gnt-instance info to see the"
8429
                                 " status of disks"))
8430
    cstep = 5
8431
    if self.early_release:
8432
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
8433
      cstep += 1
8434
      self._RemoveOldStorage(self.target_node, iv_names)
8435
      # WARNING: we release all node locks here, do not do other RPCs
8436
      # than WaitForSync to the primary node
8437
      self._ReleaseNodeLock([self.instance.primary_node,
8438
                             self.target_node,
8439
                             self.new_node])
8440

    
8441
    # Wait for sync
8442
    # This can fail as the old devices are degraded and _WaitForSync
8443
    # does a combined result over all disks, so we don't check its return value
8444
    self.lu.LogStep(cstep, steps_total, "Sync devices")
8445
    cstep += 1
8446
    _WaitForSync(self.lu, self.instance)
8447

    
8448
    # Check all devices manually
8449
    self._CheckDevices(self.instance.primary_node, iv_names)
8450

    
8451
    # Step: remove old storage
8452
    if not self.early_release:
8453
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
8454
      self._RemoveOldStorage(self.target_node, iv_names)
8455

    
8456

    
8457
class LURepairNodeStorage(NoHooksLU):
8458
  """Repairs the volume group on a node.
8459

8460
  """
8461
  _OP_PARAMS = [
8462
    _PNodeName,
8463
    ("storage_type", ht.NoDefault, _CheckStorageType),
8464
    ("name", ht.NoDefault, ht.TNonEmptyString),
8465
    ("ignore_consistency", False, ht.TBool),
8466
    ]
8467
  REQ_BGL = False
8468

    
8469
  def CheckArguments(self):
8470
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
8471

    
8472
    storage_type = self.op.storage_type
8473

    
8474
    if (constants.SO_FIX_CONSISTENCY not in
8475
        constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
8476
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
8477
                                 " repaired" % storage_type,
8478
                                 errors.ECODE_INVAL)
8479

    
8480
  def ExpandNames(self):
8481
    self.needed_locks = {
8482
      locking.LEVEL_NODE: [self.op.node_name],
8483
      }
8484

    
8485
  def _CheckFaultyDisks(self, instance, node_name):
8486
    """Ensure faulty disks abort the opcode or at least warn."""
8487
    try:
8488
      if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
8489
                                  node_name, True):
8490
        raise errors.OpPrereqError("Instance '%s' has faulty disks on"
8491
                                   " node '%s'" % (instance.name, node_name),
8492
                                   errors.ECODE_STATE)
8493
    except errors.OpPrereqError, err:
8494
      if self.op.ignore_consistency:
8495
        self.proc.LogWarning(str(err.args[0]))
8496
      else:
8497
        raise
8498

    
8499
  def CheckPrereq(self):
8500
    """Check prerequisites.
8501

8502
    """
8503
    # Check whether any instance on this node has faulty disks
8504
    for inst in _GetNodeInstances(self.cfg, self.op.node_name):
8505
      if not inst.admin_up:
8506
        continue
8507
      check_nodes = set(inst.all_nodes)
8508
      check_nodes.discard(self.op.node_name)
8509
      for inst_node_name in check_nodes:
8510
        self._CheckFaultyDisks(inst, inst_node_name)
8511

    
8512
  def Exec(self, feedback_fn):
8513
    feedback_fn("Repairing storage unit '%s' on %s ..." %
8514
                (self.op.name, self.op.node_name))
8515

    
8516
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
8517
    result = self.rpc.call_storage_execute(self.op.node_name,
8518
                                           self.op.storage_type, st_args,
8519
                                           self.op.name,
8520
                                           constants.SO_FIX_CONSISTENCY)
8521
    result.Raise("Failed to repair storage unit '%s' on %s" %
8522
                 (self.op.name, self.op.node_name))
8523

    
8524

    
8525
class LUNodeEvacuationStrategy(NoHooksLU):
8526
  """Computes the node evacuation strategy.
8527

8528
  """
8529
  _OP_PARAMS = [
8530
    ("nodes", ht.NoDefault, ht.TListOf(ht.TNonEmptyString)),
8531
    ("remote_node", None, ht.TMaybeString),
8532
    ("iallocator", None, ht.TMaybeString),
8533
    ]
8534
  REQ_BGL = False
8535

    
8536
  def CheckArguments(self):
8537
    _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
8538

    
8539
  def ExpandNames(self):
8540
    self.op.nodes = _GetWantedNodes(self, self.op.nodes)
8541
    self.needed_locks = locks = {}
8542
    if self.op.remote_node is None:
8543
      locks[locking.LEVEL_NODE] = locking.ALL_SET
8544
    else:
8545
      self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
8546
      locks[locking.LEVEL_NODE] = self.op.nodes + [self.op.remote_node]
8547

    
8548
  def Exec(self, feedback_fn):
8549
    if self.op.remote_node is not None:
8550
      instances = []
8551
      for node in self.op.nodes:
8552
        instances.extend(_GetNodeSecondaryInstances(self.cfg, node))
8553
      result = []
8554
      for i in instances:
8555
        if i.primary_node == self.op.remote_node:
8556
          raise errors.OpPrereqError("Node %s is the primary node of"
8557
                                     " instance %s, cannot use it as"
8558
                                     " secondary" %
8559
                                     (self.op.remote_node, i.name),
8560
                                     errors.ECODE_INVAL)
8561
        result.append([i.name, self.op.remote_node])
8562
    else:
8563
      ial = IAllocator(self.cfg, self.rpc,
8564
                       mode=constants.IALLOCATOR_MODE_MEVAC,
8565
                       evac_nodes=self.op.nodes)
8566
      ial.Run(self.op.iallocator, validate=True)
8567
      if not ial.success:
8568
        raise errors.OpExecError("No valid evacuation solution: %s" % ial.info,
8569
                                 errors.ECODE_NORES)
8570
      result = ial.result
8571
    return result
8572

    
8573

    
8574
class LUGrowDisk(LogicalUnit):
8575
  """Grow a disk of an instance.
8576

8577
  """
8578
  HPATH = "disk-grow"
8579
  HTYPE = constants.HTYPE_INSTANCE
8580
  _OP_PARAMS = [
8581
    _PInstanceName,
8582
    ("disk", ht.NoDefault, ht.TInt),
8583
    ("amount", ht.NoDefault, ht.TInt),
8584
    ("wait_for_sync", True, ht.TBool),
8585
    ]
8586
  REQ_BGL = False
8587

    
8588
  def ExpandNames(self):
8589
    self._ExpandAndLockInstance()
8590
    self.needed_locks[locking.LEVEL_NODE] = []
8591
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8592

    
8593
  def DeclareLocks(self, level):
8594
    if level == locking.LEVEL_NODE:
8595
      self._LockInstancesNodes()
8596

    
8597
  def BuildHooksEnv(self):
8598
    """Build hooks env.
8599

8600
    This runs on the master, the primary and all the secondaries.
8601

8602
    """
8603
    env = {
8604
      "DISK": self.op.disk,
8605
      "AMOUNT": self.op.amount,
8606
      }
8607
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
8608
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
8609
    return env, nl, nl
8610

    
8611
  def CheckPrereq(self):
8612
    """Check prerequisites.
8613

8614
    This checks that the instance is in the cluster.
8615

8616
    """
8617
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
8618
    assert instance is not None, \
8619
      "Cannot retrieve locked instance %s" % self.op.instance_name
8620
    nodenames = list(instance.all_nodes)
8621
    for node in nodenames:
8622
      _CheckNodeOnline(self, node)
8623

    
8624
    self.instance = instance
8625

    
8626
    if instance.disk_template not in constants.DTS_GROWABLE:
8627
      raise errors.OpPrereqError("Instance's disk layout does not support"
8628
                                 " growing.", errors.ECODE_INVAL)
8629

    
8630
    self.disk = instance.FindDisk(self.op.disk)
8631

    
8632
    if instance.disk_template != constants.DT_FILE:
8633
      # TODO: check the free disk space for file, when that feature will be
8634
      # supported
8635
      _CheckNodesFreeDisk(self, nodenames, self.op.amount)
8636

    
8637
  def Exec(self, feedback_fn):
8638
    """Execute disk grow.
8639

8640
    """
8641
    instance = self.instance
8642
    disk = self.disk
8643

    
8644
    disks_ok, _ = _AssembleInstanceDisks(self, self.instance, disks=[disk])
8645
    if not disks_ok:
8646
      raise errors.OpExecError("Cannot activate block device to grow")
8647

    
8648
    for node in instance.all_nodes:
8649
      self.cfg.SetDiskID(disk, node)
8650
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
8651
      result.Raise("Grow request failed to node %s" % node)
8652

    
8653
      # TODO: Rewrite code to work properly
8654
      # DRBD goes into sync mode for a short amount of time after executing the
8655
      # "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby
8656
      # calling "resize" in sync mode fails. Sleeping for a short amount of
8657
      # time is a work-around.
8658
      time.sleep(5)
8659

    
8660
    disk.RecordGrow(self.op.amount)
8661
    self.cfg.Update(instance, feedback_fn)
8662
    if self.op.wait_for_sync:
8663
      disk_abort = not _WaitForSync(self, instance, disks=[disk])
8664
      if disk_abort:
8665
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
8666
                             " status.\nPlease check the instance.")
8667
      if not instance.admin_up:
8668
        _SafeShutdownInstanceDisks(self, instance, disks=[disk])
8669
    elif not instance.admin_up:
8670
      self.proc.LogWarning("Not shutting down the disk even if the instance is"
8671
                           " not supposed to be running because no wait for"
8672
                           " sync mode was requested.")
8673

    
8674

    
8675
class LUQueryInstanceData(NoHooksLU):
8676
  """Query runtime instance data.
8677

8678
  """
8679
  _OP_PARAMS = [
8680
    ("instances", ht.EmptyList, ht.TListOf(ht.TNonEmptyString)),
8681
    ("static", False, ht.TBool),
8682
    ]
8683
  REQ_BGL = False
8684

    
8685
  def ExpandNames(self):
8686
    self.needed_locks = {}
8687
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
8688

    
8689
    if self.op.instances:
8690
      self.wanted_names = []
8691
      for name in self.op.instances:
8692
        full_name = _ExpandInstanceName(self.cfg, name)
8693
        self.wanted_names.append(full_name)
8694
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
8695
    else:
8696
      self.wanted_names = None
8697
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
8698

    
8699
    self.needed_locks[locking.LEVEL_NODE] = []
8700
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8701

    
8702
  def DeclareLocks(self, level):
8703
    if level == locking.LEVEL_NODE:
8704
      self._LockInstancesNodes()
8705

    
8706
  def CheckPrereq(self):
8707
    """Check prerequisites.
8708

8709
    This only checks the optional instance list against the existing names.
8710

8711
    """
8712
    if self.wanted_names is None:
8713
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
8714

    
8715
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
8716
                             in self.wanted_names]
8717

    
8718
  def _ComputeBlockdevStatus(self, node, instance_name, dev):
8719
    """Returns the status of a block device
8720

8721
    """
8722
    if self.op.static or not node:
8723
      return None
8724

    
8725
    self.cfg.SetDiskID(dev, node)
8726

    
8727
    result = self.rpc.call_blockdev_find(node, dev)
8728
    if result.offline:
8729
      return None
8730

    
8731
    result.Raise("Can't compute disk status for %s" % instance_name)
8732

    
8733
    status = result.payload
8734
    if status is None:
8735
      return None
8736

    
8737
    return (status.dev_path, status.major, status.minor,
8738
            status.sync_percent, status.estimated_time,
8739
            status.is_degraded, status.ldisk_status)
8740

    
8741
  def _ComputeDiskStatus(self, instance, snode, dev):
8742
    """Compute block device status.
8743

8744
    """
8745
    if dev.dev_type in constants.LDS_DRBD:
8746
      # we change the snode then (otherwise we use the one passed in)
8747
      if dev.logical_id[0] == instance.primary_node:
8748
        snode = dev.logical_id[1]
8749
      else:
8750
        snode = dev.logical_id[0]
8751

    
8752
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
8753
                                              instance.name, dev)
8754
    dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
8755

    
8756
    if dev.children:
8757
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
8758
                      for child in dev.children]
8759
    else:
8760
      dev_children = []
8761

    
8762
    data = {
8763
      "iv_name": dev.iv_name,
8764
      "dev_type": dev.dev_type,
8765
      "logical_id": dev.logical_id,
8766
      "physical_id": dev.physical_id,
8767
      "pstatus": dev_pstatus,
8768
      "sstatus": dev_sstatus,
8769
      "children": dev_children,
8770
      "mode": dev.mode,
8771
      "size": dev.size,
8772
      }
8773

    
8774
    return data
8775

    
8776
  def Exec(self, feedback_fn):
8777
    """Gather and return data"""
8778
    result = {}
8779

    
8780
    cluster = self.cfg.GetClusterInfo()
8781

    
8782
    for instance in self.wanted_instances:
8783
      if not self.op.static:
8784
        remote_info = self.rpc.call_instance_info(instance.primary_node,
8785
                                                  instance.name,
8786
                                                  instance.hypervisor)
8787
        remote_info.Raise("Error checking node %s" % instance.primary_node)
8788
        remote_info = remote_info.payload
8789
        if remote_info and "state" in remote_info:
8790
          remote_state = "up"
8791
        else:
8792
          remote_state = "down"
8793
      else:
8794
        remote_state = None
8795
      if instance.admin_up:
8796
        config_state = "up"
8797
      else:
8798
        config_state = "down"
8799

    
8800
      disks = [self._ComputeDiskStatus(instance, None, device)
8801
               for device in instance.disks]
8802

    
8803
      idict = {
8804
        "name": instance.name,
8805
        "config_state": config_state,
8806
        "run_state": remote_state,
8807
        "pnode": instance.primary_node,
8808
        "snodes": instance.secondary_nodes,
8809
        "os": instance.os,
8810
        # this happens to be the same format used for hooks
8811
        "nics": _NICListToTuple(self, instance.nics),
8812
        "disk_template": instance.disk_template,
8813
        "disks": disks,
8814
        "hypervisor": instance.hypervisor,
8815
        "network_port": instance.network_port,
8816
        "hv_instance": instance.hvparams,
8817
        "hv_actual": cluster.FillHV(instance, skip_globals=True),
8818
        "be_instance": instance.beparams,
8819
        "be_actual": cluster.FillBE(instance),
8820
        "os_instance": instance.osparams,
8821
        "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
8822
        "serial_no": instance.serial_no,
8823
        "mtime": instance.mtime,
8824
        "ctime": instance.ctime,
8825
        "uuid": instance.uuid,
8826
        }
8827

    
8828
      result[instance.name] = idict
8829

    
8830
    return result
8831

    
8832

    
8833
class LUSetInstanceParams(LogicalUnit):
8834
  """Modifies an instances's parameters.
8835

8836
  """
8837
  HPATH = "instance-modify"
8838
  HTYPE = constants.HTYPE_INSTANCE
8839
  _OP_PARAMS = [
8840
    _PInstanceName,
8841
    ("nics", ht.EmptyList, ht.TList),
8842
    ("disks", ht.EmptyList, ht.TList),
8843
    ("beparams", ht.EmptyDict, ht.TDict),
8844
    ("hvparams", ht.EmptyDict, ht.TDict),
8845
    ("disk_template", None, ht.TMaybeString),
8846
    ("remote_node", None, ht.TMaybeString),
8847
    ("os_name", None, ht.TMaybeString),
8848
    ("force_variant", False, ht.TBool),
8849
    ("osparams", None, ht.TOr(ht.TDict, ht.TNone)),
8850
    _PForce,
8851
    ]
8852
  REQ_BGL = False
8853

    
8854
  def CheckArguments(self):
8855
    if not (self.op.nics or self.op.disks or self.op.disk_template or
8856
            self.op.hvparams or self.op.beparams or self.op.os_name):
8857
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
8858

    
8859
    if self.op.hvparams:
8860
      _CheckGlobalHvParams(self.op.hvparams)
8861

    
8862
    # Disk validation
8863
    disk_addremove = 0
8864
    for disk_op, disk_dict in self.op.disks:
8865
      utils.ForceDictType(disk_dict, constants.IDISK_PARAMS_TYPES)
8866
      if disk_op == constants.DDM_REMOVE:
8867
        disk_addremove += 1
8868
        continue
8869
      elif disk_op == constants.DDM_ADD:
8870
        disk_addremove += 1
8871
      else:
8872
        if not isinstance(disk_op, int):
8873
          raise errors.OpPrereqError("Invalid disk index", errors.ECODE_INVAL)
8874
        if not isinstance(disk_dict, dict):
8875
          msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
8876
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
8877

    
8878
      if disk_op == constants.DDM_ADD:
8879
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
8880
        if mode not in constants.DISK_ACCESS_SET:
8881
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
8882
                                     errors.ECODE_INVAL)
8883
        size = disk_dict.get('size', None)
8884
        if size is None:
8885
          raise errors.OpPrereqError("Required disk parameter size missing",
8886
                                     errors.ECODE_INVAL)
8887
        try:
8888
          size = int(size)
8889
        except (TypeError, ValueError), err:
8890
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
8891
                                     str(err), errors.ECODE_INVAL)
8892
        disk_dict['size'] = size
8893
      else:
8894
        # modification of disk
8895
        if 'size' in disk_dict:
8896
          raise errors.OpPrereqError("Disk size change not possible, use"
8897
                                     " grow-disk", errors.ECODE_INVAL)
8898

    
8899
    if disk_addremove > 1:
8900
      raise errors.OpPrereqError("Only one disk add or remove operation"
8901
                                 " supported at a time", errors.ECODE_INVAL)
8902

    
8903
    if self.op.disks and self.op.disk_template is not None:
8904
      raise errors.OpPrereqError("Disk template conversion and other disk"
8905
                                 " changes not supported at the same time",
8906
                                 errors.ECODE_INVAL)
8907

    
8908
    if self.op.disk_template:
8909
      _CheckDiskTemplate(self.op.disk_template)
8910
      if (self.op.disk_template in constants.DTS_NET_MIRROR and
8911
          self.op.remote_node is None):
8912
        raise errors.OpPrereqError("Changing the disk template to a mirrored"
8913
                                   " one requires specifying a secondary node",
8914
                                   errors.ECODE_INVAL)
8915

    
8916
    # NIC validation
8917
    nic_addremove = 0
8918
    for nic_op, nic_dict in self.op.nics:
8919
      utils.ForceDictType(nic_dict, constants.INIC_PARAMS_TYPES)
8920
      if nic_op == constants.DDM_REMOVE:
8921
        nic_addremove += 1
8922
        continue
8923
      elif nic_op == constants.DDM_ADD:
8924
        nic_addremove += 1
8925
      else:
8926
        if not isinstance(nic_op, int):
8927
          raise errors.OpPrereqError("Invalid nic index", errors.ECODE_INVAL)
8928
        if not isinstance(nic_dict, dict):
8929
          msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
8930
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
8931

    
8932
      # nic_dict should be a dict
8933
      nic_ip = nic_dict.get('ip', None)
8934
      if nic_ip is not None:
8935
        if nic_ip.lower() == constants.VALUE_NONE:
8936
          nic_dict['ip'] = None
8937
        else:
8938
          if not netutils.IPAddress.IsValid(nic_ip):
8939
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip,
8940
                                       errors.ECODE_INVAL)
8941

    
8942
      nic_bridge = nic_dict.get('bridge', None)
8943
      nic_link = nic_dict.get('link', None)
8944
      if nic_bridge and nic_link:
8945
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
8946
                                   " at the same time", errors.ECODE_INVAL)
8947
      elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
8948
        nic_dict['bridge'] = None
8949
      elif nic_link and nic_link.lower() == constants.VALUE_NONE:
8950
        nic_dict['link'] = None
8951

    
8952
      if nic_op == constants.DDM_ADD:
8953
        nic_mac = nic_dict.get('mac', None)
8954
        if nic_mac is None:
8955
          nic_dict['mac'] = constants.VALUE_AUTO
8956

    
8957
      if 'mac' in nic_dict:
8958
        nic_mac = nic_dict['mac']
8959
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
8960
          nic_mac = utils.NormalizeAndValidateMac(nic_mac)
8961

    
8962
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
8963
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
8964
                                     " modifying an existing nic",
8965
                                     errors.ECODE_INVAL)
8966

    
8967
    if nic_addremove > 1:
8968
      raise errors.OpPrereqError("Only one NIC add or remove operation"
8969
                                 " supported at a time", errors.ECODE_INVAL)
8970

    
8971
  def ExpandNames(self):
8972
    self._ExpandAndLockInstance()
8973
    self.needed_locks[locking.LEVEL_NODE] = []
8974
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8975

    
8976
  def DeclareLocks(self, level):
8977
    if level == locking.LEVEL_NODE:
8978
      self._LockInstancesNodes()
8979
      if self.op.disk_template and self.op.remote_node:
8980
        self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
8981
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
8982

    
8983
  def BuildHooksEnv(self):
8984
    """Build hooks env.
8985

8986
    This runs on the master, primary and secondaries.
8987

8988
    """
8989
    args = dict()
8990
    if constants.BE_MEMORY in self.be_new:
8991
      args['memory'] = self.be_new[constants.BE_MEMORY]
8992
    if constants.BE_VCPUS in self.be_new:
8993
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
8994
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
8995
    # information at all.
8996
    if self.op.nics:
8997
      args['nics'] = []
8998
      nic_override = dict(self.op.nics)
8999
      for idx, nic in enumerate(self.instance.nics):
9000
        if idx in nic_override:
9001
          this_nic_override = nic_override[idx]
9002
        else:
9003
          this_nic_override = {}
9004
        if 'ip' in this_nic_override:
9005
          ip = this_nic_override['ip']
9006
        else:
9007
          ip = nic.ip
9008
        if 'mac' in this_nic_override:
9009
          mac = this_nic_override['mac']
9010
        else:
9011
          mac = nic.mac
9012
        if idx in self.nic_pnew:
9013
          nicparams = self.nic_pnew[idx]
9014
        else:
9015
          nicparams = self.cluster.SimpleFillNIC(nic.nicparams)
9016
        mode = nicparams[constants.NIC_MODE]
9017
        link = nicparams[constants.NIC_LINK]
9018
        args['nics'].append((ip, mac, mode, link))
9019
      if constants.DDM_ADD in nic_override:
9020
        ip = nic_override[constants.DDM_ADD].get('ip', None)
9021
        mac = nic_override[constants.DDM_ADD]['mac']
9022
        nicparams = self.nic_pnew[constants.DDM_ADD]
9023
        mode = nicparams[constants.NIC_MODE]
9024
        link = nicparams[constants.NIC_LINK]
9025
        args['nics'].append((ip, mac, mode, link))
9026
      elif constants.DDM_REMOVE in nic_override:
9027
        del args['nics'][-1]
9028

    
9029
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
9030
    if self.op.disk_template:
9031
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
9032
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
9033
    return env, nl, nl
9034

    
9035
  def CheckPrereq(self):
9036
    """Check prerequisites.
9037

9038
    This only checks the instance list against the existing names.
9039

9040
    """
9041
    # checking the new params on the primary/secondary nodes
9042

    
9043
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
9044
    cluster = self.cluster = self.cfg.GetClusterInfo()
9045
    assert self.instance is not None, \
9046
      "Cannot retrieve locked instance %s" % self.op.instance_name
9047
    pnode = instance.primary_node
9048
    nodelist = list(instance.all_nodes)
9049

    
9050
    # OS change
9051
    if self.op.os_name and not self.op.force:
9052
      _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
9053
                      self.op.force_variant)
9054
      instance_os = self.op.os_name
9055
    else:
9056
      instance_os = instance.os
9057

    
9058
    if self.op.disk_template:
9059
      if instance.disk_template == self.op.disk_template:
9060
        raise errors.OpPrereqError("Instance already has disk template %s" %
9061
                                   instance.disk_template, errors.ECODE_INVAL)
9062

    
9063
      if (instance.disk_template,
9064
          self.op.disk_template) not in self._DISK_CONVERSIONS:
9065
        raise errors.OpPrereqError("Unsupported disk template conversion from"
9066
                                   " %s to %s" % (instance.disk_template,
9067
                                                  self.op.disk_template),
9068
                                   errors.ECODE_INVAL)
9069
      _CheckInstanceDown(self, instance, "cannot change disk template")
9070
      if self.op.disk_template in constants.DTS_NET_MIRROR:
9071
        if self.op.remote_node == pnode:
9072
          raise errors.OpPrereqError("Given new secondary node %s is the same"
9073
                                     " as the primary node of the instance" %
9074
                                     self.op.remote_node, errors.ECODE_STATE)
9075
        _CheckNodeOnline(self, self.op.remote_node)
9076
        _CheckNodeNotDrained(self, self.op.remote_node)
9077
        disks = [{"size": d.size} for d in instance.disks]
9078
        required = _ComputeDiskSize(self.op.disk_template, disks)
9079
        _CheckNodesFreeDisk(self, [self.op.remote_node], required)
9080

    
9081
    # hvparams processing
9082
    if self.op.hvparams:
9083
      hv_type = instance.hypervisor
9084
      i_hvdict = _GetUpdatedParams(instance.hvparams, self.op.hvparams)
9085
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
9086
      hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
9087

    
9088
      # local check
9089
      hypervisor.GetHypervisor(hv_type).CheckParameterSyntax(hv_new)
9090
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
9091
      self.hv_new = hv_new # the new actual values
9092
      self.hv_inst = i_hvdict # the new dict (without defaults)
9093
    else:
9094
      self.hv_new = self.hv_inst = {}
9095

    
9096
    # beparams processing
9097
    if self.op.beparams:
9098
      i_bedict = _GetUpdatedParams(instance.beparams, self.op.beparams,
9099
                                   use_none=True)
9100
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
9101
      be_new = cluster.SimpleFillBE(i_bedict)
9102
      self.be_new = be_new # the new actual values
9103
      self.be_inst = i_bedict # the new dict (without defaults)
9104
    else:
9105
      self.be_new = self.be_inst = {}
9106

    
9107
    # osparams processing
9108
    if self.op.osparams:
9109
      i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
9110
      _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
9111
      self.os_inst = i_osdict # the new dict (without defaults)
9112
    else:
9113
      self.os_inst = {}
9114

    
9115
    self.warn = []
9116

    
9117
    if constants.BE_MEMORY in self.op.beparams and not self.op.force:
9118
      mem_check_list = [pnode]
9119
      if be_new[constants.BE_AUTO_BALANCE]:
9120
        # either we changed auto_balance to yes or it was from before
9121
        mem_check_list.extend(instance.secondary_nodes)
9122
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
9123
                                                  instance.hypervisor)
9124
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
9125
                                         instance.hypervisor)
9126
      pninfo = nodeinfo[pnode]
9127
      msg = pninfo.fail_msg
9128
      if msg:
9129
        # Assume the primary node is unreachable and go ahead
9130
        self.warn.append("Can't get info from primary node %s: %s" %
9131
                         (pnode,  msg))
9132
      elif not isinstance(pninfo.payload.get('memory_free', None), int):
9133
        self.warn.append("Node data from primary node %s doesn't contain"
9134
                         " free memory information" % pnode)
9135
      elif instance_info.fail_msg:
9136
        self.warn.append("Can't get instance runtime information: %s" %
9137
                        instance_info.fail_msg)
9138
      else:
9139
        if instance_info.payload:
9140
          current_mem = int(instance_info.payload['memory'])
9141
        else:
9142
          # Assume instance not running
9143
          # (there is a slight race condition here, but it's not very probable,
9144
          # and we have no other way to check)
9145
          current_mem = 0
9146
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
9147
                    pninfo.payload['memory_free'])
9148
        if miss_mem > 0:
9149
          raise errors.OpPrereqError("This change will prevent the instance"
9150
                                     " from starting, due to %d MB of memory"
9151
                                     " missing on its primary node" % miss_mem,
9152
                                     errors.ECODE_NORES)
9153

    
9154
      if be_new[constants.BE_AUTO_BALANCE]:
9155
        for node, nres in nodeinfo.items():
9156
          if node not in instance.secondary_nodes:
9157
            continue
9158
          msg = nres.fail_msg
9159
          if msg:
9160
            self.warn.append("Can't get info from secondary node %s: %s" %
9161
                             (node, msg))
9162
          elif not isinstance(nres.payload.get('memory_free', None), int):
9163
            self.warn.append("Secondary node %s didn't return free"
9164
                             " memory information" % node)
9165
          elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
9166
            self.warn.append("Not enough memory to failover instance to"
9167
                             " secondary node %s" % node)
9168

    
9169
    # NIC processing
9170
    self.nic_pnew = {}
9171
    self.nic_pinst = {}
9172
    for nic_op, nic_dict in self.op.nics:
9173
      if nic_op == constants.DDM_REMOVE:
9174
        if not instance.nics:
9175
          raise errors.OpPrereqError("Instance has no NICs, cannot remove",
9176
                                     errors.ECODE_INVAL)
9177
        continue
9178
      if nic_op != constants.DDM_ADD:
9179
        # an existing nic
9180
        if not instance.nics:
9181
          raise errors.OpPrereqError("Invalid NIC index %s, instance has"
9182
                                     " no NICs" % nic_op,
9183
                                     errors.ECODE_INVAL)
9184
        if nic_op < 0 or nic_op >= len(instance.nics):
9185
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
9186
                                     " are 0 to %d" %
9187
                                     (nic_op, len(instance.nics) - 1),
9188
                                     errors.ECODE_INVAL)
9189
        old_nic_params = instance.nics[nic_op].nicparams
9190
        old_nic_ip = instance.nics[nic_op].ip
9191
      else:
9192
        old_nic_params = {}
9193
        old_nic_ip = None
9194

    
9195
      update_params_dict = dict([(key, nic_dict[key])
9196
                                 for key in constants.NICS_PARAMETERS
9197
                                 if key in nic_dict])
9198

    
9199
      if 'bridge' in nic_dict:
9200
        update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
9201

    
9202
      new_nic_params = _GetUpdatedParams(old_nic_params,
9203
                                         update_params_dict)
9204
      utils.ForceDictType(new_nic_params, constants.NICS_PARAMETER_TYPES)
9205
      new_filled_nic_params = cluster.SimpleFillNIC(new_nic_params)
9206
      objects.NIC.CheckParameterSyntax(new_filled_nic_params)
9207
      self.nic_pinst[nic_op] = new_nic_params
9208
      self.nic_pnew[nic_op] = new_filled_nic_params
9209
      new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
9210

    
9211
      if new_nic_mode == constants.NIC_MODE_BRIDGED:
9212
        nic_bridge = new_filled_nic_params[constants.NIC_LINK]
9213
        msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
9214
        if msg:
9215
          msg = "Error checking bridges on node %s: %s" % (pnode, msg)
9216
          if self.op.force:
9217
            self.warn.append(msg)
9218
          else:
9219
            raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
9220
      if new_nic_mode == constants.NIC_MODE_ROUTED:
9221
        if 'ip' in nic_dict:
9222
          nic_ip = nic_dict['ip']
9223
        else:
9224
          nic_ip = old_nic_ip
9225
        if nic_ip is None:
9226
          raise errors.OpPrereqError('Cannot set the nic ip to None'
9227
                                     ' on a routed nic', errors.ECODE_INVAL)
9228
      if 'mac' in nic_dict:
9229
        nic_mac = nic_dict['mac']
9230
        if nic_mac is None:
9231
          raise errors.OpPrereqError('Cannot set the nic mac to None',
9232
                                     errors.ECODE_INVAL)
9233
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
9234
          # otherwise generate the mac
9235
          nic_dict['mac'] = self.cfg.GenerateMAC(self.proc.GetECId())
9236
        else:
9237
          # or validate/reserve the current one
9238
          try:
9239
            self.cfg.ReserveMAC(nic_mac, self.proc.GetECId())
9240
          except errors.ReservationError:
9241
            raise errors.OpPrereqError("MAC address %s already in use"
9242
                                       " in cluster" % nic_mac,
9243
                                       errors.ECODE_NOTUNIQUE)
9244

    
9245
    # DISK processing
9246
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
9247
      raise errors.OpPrereqError("Disk operations not supported for"
9248
                                 " diskless instances",
9249
                                 errors.ECODE_INVAL)
9250
    for disk_op, _ in self.op.disks:
9251
      if disk_op == constants.DDM_REMOVE:
9252
        if len(instance.disks) == 1:
9253
          raise errors.OpPrereqError("Cannot remove the last disk of"
9254
                                     " an instance", errors.ECODE_INVAL)
9255
        _CheckInstanceDown(self, instance, "cannot remove disks")
9256

    
9257
      if (disk_op == constants.DDM_ADD and
9258
          len(instance.nics) >= constants.MAX_DISKS):
9259
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
9260
                                   " add more" % constants.MAX_DISKS,
9261
                                   errors.ECODE_STATE)
9262
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
9263
        # an existing disk
9264
        if disk_op < 0 or disk_op >= len(instance.disks):
9265
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
9266
                                     " are 0 to %d" %
9267
                                     (disk_op, len(instance.disks)),
9268
                                     errors.ECODE_INVAL)
9269

    
9270
    return
9271

    
9272
  def _ConvertPlainToDrbd(self, feedback_fn):
9273
    """Converts an instance from plain to drbd.
9274

9275
    """
9276
    feedback_fn("Converting template to drbd")
9277
    instance = self.instance
9278
    pnode = instance.primary_node
9279
    snode = self.op.remote_node
9280

    
9281
    # create a fake disk info for _GenerateDiskTemplate
9282
    disk_info = [{"size": d.size, "mode": d.mode} for d in instance.disks]
9283
    new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
9284
                                      instance.name, pnode, [snode],
9285
                                      disk_info, None, None, 0)
9286
    info = _GetInstanceInfoText(instance)
9287
    feedback_fn("Creating aditional volumes...")
9288
    # first, create the missing data and meta devices
9289
    for disk in new_disks:
9290
      # unfortunately this is... not too nice
9291
      _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
9292
                            info, True)
9293
      for child in disk.children:
9294
        _CreateSingleBlockDev(self, snode, instance, child, info, True)
9295
    # at this stage, all new LVs have been created, we can rename the
9296
    # old ones
9297
    feedback_fn("Renaming original volumes...")
9298
    rename_list = [(o, n.children[0].logical_id)
9299
                   for (o, n) in zip(instance.disks, new_disks)]
9300
    result = self.rpc.call_blockdev_rename(pnode, rename_list)
9301
    result.Raise("Failed to rename original LVs")
9302

    
9303
    feedback_fn("Initializing DRBD devices...")
9304
    # all child devices are in place, we can now create the DRBD devices
9305
    for disk in new_disks:
9306
      for node in [pnode, snode]:
9307
        f_create = node == pnode
9308
        _CreateSingleBlockDev(self, node, instance, disk, info, f_create)
9309

    
9310
    # at this point, the instance has been modified
9311
    instance.disk_template = constants.DT_DRBD8
9312
    instance.disks = new_disks
9313
    self.cfg.Update(instance, feedback_fn)
9314

    
9315
    # disks are created, waiting for sync
9316
    disk_abort = not _WaitForSync(self, instance)
9317
    if disk_abort:
9318
      raise errors.OpExecError("There are some degraded disks for"
9319
                               " this instance, please cleanup manually")
9320

    
9321
  def _ConvertDrbdToPlain(self, feedback_fn):
9322
    """Converts an instance from drbd to plain.
9323

9324
    """
9325
    instance = self.instance
9326
    assert len(instance.secondary_nodes) == 1
9327
    pnode = instance.primary_node
9328
    snode = instance.secondary_nodes[0]
9329
    feedback_fn("Converting template to plain")
9330

    
9331
    old_disks = instance.disks
9332
    new_disks = [d.children[0] for d in old_disks]
9333

    
9334
    # copy over size and mode
9335
    for parent, child in zip(old_disks, new_disks):
9336
      child.size = parent.size
9337
      child.mode = parent.mode
9338

    
9339
    # update instance structure
9340
    instance.disks = new_disks
9341
    instance.disk_template = constants.DT_PLAIN
9342
    self.cfg.Update(instance, feedback_fn)
9343

    
9344
    feedback_fn("Removing volumes on the secondary node...")
9345
    for disk in old_disks:
9346
      self.cfg.SetDiskID(disk, snode)
9347
      msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
9348
      if msg:
9349
        self.LogWarning("Could not remove block device %s on node %s,"
9350
                        " continuing anyway: %s", disk.iv_name, snode, msg)
9351

    
9352
    feedback_fn("Removing unneeded volumes on the primary node...")
9353
    for idx, disk in enumerate(old_disks):
9354
      meta = disk.children[1]
9355
      self.cfg.SetDiskID(meta, pnode)
9356
      msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
9357
      if msg:
9358
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
9359
                        " continuing anyway: %s", idx, pnode, msg)
9360

    
9361

    
9362
  def Exec(self, feedback_fn):
9363
    """Modifies an instance.
9364

9365
    All parameters take effect only at the next restart of the instance.
9366

9367
    """
9368
    # Process here the warnings from CheckPrereq, as we don't have a
9369
    # feedback_fn there.
9370
    for warn in self.warn:
9371
      feedback_fn("WARNING: %s" % warn)
9372

    
9373
    result = []
9374
    instance = self.instance
9375
    # disk changes
9376
    for disk_op, disk_dict in self.op.disks:
9377
      if disk_op == constants.DDM_REMOVE:
9378
        # remove the last disk
9379
        device = instance.disks.pop()
9380
        device_idx = len(instance.disks)
9381
        for node, disk in device.ComputeNodeTree(instance.primary_node):
9382
          self.cfg.SetDiskID(disk, node)
9383
          msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
9384
          if msg:
9385
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
9386
                            " continuing anyway", device_idx, node, msg)
9387
        result.append(("disk/%d" % device_idx, "remove"))
9388
      elif disk_op == constants.DDM_ADD:
9389
        # add a new disk
9390
        if instance.disk_template == constants.DT_FILE:
9391
          file_driver, file_path = instance.disks[0].logical_id
9392
          file_path = os.path.dirname(file_path)
9393
        else:
9394
          file_driver = file_path = None
9395
        disk_idx_base = len(instance.disks)
9396
        new_disk = _GenerateDiskTemplate(self,
9397
                                         instance.disk_template,
9398
                                         instance.name, instance.primary_node,
9399
                                         instance.secondary_nodes,
9400
                                         [disk_dict],
9401
                                         file_path,
9402
                                         file_driver,
9403
                                         disk_idx_base)[0]
9404
        instance.disks.append(new_disk)
9405
        info = _GetInstanceInfoText(instance)
9406

    
9407
        logging.info("Creating volume %s for instance %s",
9408
                     new_disk.iv_name, instance.name)
9409
        # Note: this needs to be kept in sync with _CreateDisks
9410
        #HARDCODE
9411
        for node in instance.all_nodes:
9412
          f_create = node == instance.primary_node
9413
          try:
9414
            _CreateBlockDev(self, node, instance, new_disk,
9415
                            f_create, info, f_create)
9416
          except errors.OpExecError, err:
9417
            self.LogWarning("Failed to create volume %s (%s) on"
9418
                            " node %s: %s",
9419
                            new_disk.iv_name, new_disk, node, err)
9420
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
9421
                       (new_disk.size, new_disk.mode)))
9422
      else:
9423
        # change a given disk
9424
        instance.disks[disk_op].mode = disk_dict['mode']
9425
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
9426

    
9427
    if self.op.disk_template:
9428
      r_shut = _ShutdownInstanceDisks(self, instance)
9429
      if not r_shut:
9430
        raise errors.OpExecError("Cannot shutdow instance disks, unable to"
9431
                                 " proceed with disk template conversion")
9432
      mode = (instance.disk_template, self.op.disk_template)
9433
      try:
9434
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
9435
      except:
9436
        self.cfg.ReleaseDRBDMinors(instance.name)
9437
        raise
9438
      result.append(("disk_template", self.op.disk_template))
9439

    
9440
    # NIC changes
9441
    for nic_op, nic_dict in self.op.nics:
9442
      if nic_op == constants.DDM_REMOVE:
9443
        # remove the last nic
9444
        del instance.nics[-1]
9445
        result.append(("nic.%d" % len(instance.nics), "remove"))
9446
      elif nic_op == constants.DDM_ADD:
9447
        # mac and bridge should be set, by now
9448
        mac = nic_dict['mac']
9449
        ip = nic_dict.get('ip', None)
9450
        nicparams = self.nic_pinst[constants.DDM_ADD]
9451
        new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
9452
        instance.nics.append(new_nic)
9453
        result.append(("nic.%d" % (len(instance.nics) - 1),
9454
                       "add:mac=%s,ip=%s,mode=%s,link=%s" %
9455
                       (new_nic.mac, new_nic.ip,
9456
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
9457
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
9458
                       )))
9459
      else:
9460
        for key in 'mac', 'ip':
9461
          if key in nic_dict:
9462
            setattr(instance.nics[nic_op], key, nic_dict[key])
9463
        if nic_op in self.nic_pinst:
9464
          instance.nics[nic_op].nicparams = self.nic_pinst[nic_op]
9465
        for key, val in nic_dict.iteritems():
9466
          result.append(("nic.%s/%d" % (key, nic_op), val))
9467

    
9468
    # hvparams changes
9469
    if self.op.hvparams:
9470
      instance.hvparams = self.hv_inst
9471
      for key, val in self.op.hvparams.iteritems():
9472
        result.append(("hv/%s" % key, val))
9473

    
9474
    # beparams changes
9475
    if self.op.beparams:
9476
      instance.beparams = self.be_inst
9477
      for key, val in self.op.beparams.iteritems():
9478
        result.append(("be/%s" % key, val))
9479

    
9480
    # OS change
9481
    if self.op.os_name:
9482
      instance.os = self.op.os_name
9483

    
9484
    # osparams changes
9485
    if self.op.osparams:
9486
      instance.osparams = self.os_inst
9487
      for key, val in self.op.osparams.iteritems():
9488
        result.append(("os/%s" % key, val))
9489

    
9490
    self.cfg.Update(instance, feedback_fn)
9491

    
9492
    return result
9493

    
9494
  _DISK_CONVERSIONS = {
9495
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
9496
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
9497
    }
9498

    
9499

    
9500
class LUQueryExports(NoHooksLU):
9501
  """Query the exports list
9502

9503
  """
9504
  _OP_PARAMS = [
9505
    ("nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString)),
9506
    ("use_locking", False, ht.TBool),
9507
    ]
9508
  REQ_BGL = False
9509

    
9510
  def ExpandNames(self):
9511
    self.needed_locks = {}
9512
    self.share_locks[locking.LEVEL_NODE] = 1
9513
    if not self.op.nodes:
9514
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9515
    else:
9516
      self.needed_locks[locking.LEVEL_NODE] = \
9517
        _GetWantedNodes(self, self.op.nodes)
9518

    
9519
  def Exec(self, feedback_fn):
9520
    """Compute the list of all the exported system images.
9521

9522
    @rtype: dict
9523
    @return: a dictionary with the structure node->(export-list)
9524
        where export-list is a list of the instances exported on
9525
        that node.
9526

9527
    """
9528
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
9529
    rpcresult = self.rpc.call_export_list(self.nodes)
9530
    result = {}
9531
    for node in rpcresult:
9532
      if rpcresult[node].fail_msg:
9533
        result[node] = False
9534
      else:
9535
        result[node] = rpcresult[node].payload
9536

    
9537
    return result
9538

    
9539

    
9540
class LUPrepareExport(NoHooksLU):
9541
  """Prepares an instance for an export and returns useful information.
9542

9543
  """
9544
  _OP_PARAMS = [
9545
    _PInstanceName,
9546
    ("mode", ht.NoDefault, ht.TElemOf(constants.EXPORT_MODES)),
9547
    ]
9548
  REQ_BGL = False
9549

    
9550
  def ExpandNames(self):
9551
    self._ExpandAndLockInstance()
9552

    
9553
  def CheckPrereq(self):
9554
    """Check prerequisites.
9555

9556
    """
9557
    instance_name = self.op.instance_name
9558

    
9559
    self.instance = self.cfg.GetInstanceInfo(instance_name)
9560
    assert self.instance is not None, \
9561
          "Cannot retrieve locked instance %s" % self.op.instance_name
9562
    _CheckNodeOnline(self, self.instance.primary_node)
9563

    
9564
    self._cds = _GetClusterDomainSecret()
9565

    
9566
  def Exec(self, feedback_fn):
9567
    """Prepares an instance for an export.
9568

9569
    """
9570
    instance = self.instance
9571

    
9572
    if self.op.mode == constants.EXPORT_MODE_REMOTE:
9573
      salt = utils.GenerateSecret(8)
9574

    
9575
      feedback_fn("Generating X509 certificate on %s" % instance.primary_node)
9576
      result = self.rpc.call_x509_cert_create(instance.primary_node,
9577
                                              constants.RIE_CERT_VALIDITY)
9578
      result.Raise("Can't create X509 key and certificate on %s" % result.node)
9579

    
9580
      (name, cert_pem) = result.payload
9581

    
9582
      cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
9583
                                             cert_pem)
9584

    
9585
      return {
9586
        "handshake": masterd.instance.ComputeRemoteExportHandshake(self._cds),
9587
        "x509_key_name": (name, utils.Sha1Hmac(self._cds, name, salt=salt),
9588
                          salt),
9589
        "x509_ca": utils.SignX509Certificate(cert, self._cds, salt),
9590
        }
9591

    
9592
    return None
9593

    
9594

    
9595
class LUExportInstance(LogicalUnit):
9596
  """Export an instance to an image in the cluster.
9597

9598
  """
9599
  HPATH = "instance-export"
9600
  HTYPE = constants.HTYPE_INSTANCE
9601
  _OP_PARAMS = [
9602
    _PInstanceName,
9603
    ("target_node", ht.NoDefault, ht.TOr(ht.TNonEmptyString, ht.TList)),
9604
    ("shutdown", True, ht.TBool),
9605
    _PShutdownTimeout,
9606
    ("remove_instance", False, ht.TBool),
9607
    ("ignore_remove_failures", False, ht.TBool),
9608
    ("mode", constants.EXPORT_MODE_LOCAL, ht.TElemOf(constants.EXPORT_MODES)),
9609
    ("x509_key_name", None, ht.TOr(ht.TList, ht.TNone)),
9610
    ("destination_x509_ca", None, ht.TMaybeString),
9611
    ]
9612
  REQ_BGL = False
9613

    
9614
  def CheckArguments(self):
9615
    """Check the arguments.
9616

9617
    """
9618
    self.x509_key_name = self.op.x509_key_name
9619
    self.dest_x509_ca_pem = self.op.destination_x509_ca
9620

    
9621
    if self.op.mode == constants.EXPORT_MODE_REMOTE:
9622
      if not self.x509_key_name:
9623
        raise errors.OpPrereqError("Missing X509 key name for encryption",
9624
                                   errors.ECODE_INVAL)
9625

    
9626
      if not self.dest_x509_ca_pem:
9627
        raise errors.OpPrereqError("Missing destination X509 CA",
9628
                                   errors.ECODE_INVAL)
9629

    
9630
  def ExpandNames(self):
9631
    self._ExpandAndLockInstance()
9632

    
9633
    # Lock all nodes for local exports
9634
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
9635
      # FIXME: lock only instance primary and destination node
9636
      #
9637
      # Sad but true, for now we have do lock all nodes, as we don't know where
9638
      # the previous export might be, and in this LU we search for it and
9639
      # remove it from its current node. In the future we could fix this by:
9640
      #  - making a tasklet to search (share-lock all), then create the
9641
      #    new one, then one to remove, after
9642
      #  - removing the removal operation altogether
9643
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9644

    
9645
  def DeclareLocks(self, level):
9646
    """Last minute lock declaration."""
9647
    # All nodes are locked anyway, so nothing to do here.
9648

    
9649
  def BuildHooksEnv(self):
9650
    """Build hooks env.
9651

9652
    This will run on the master, primary node and target node.
9653

9654
    """
9655
    env = {
9656
      "EXPORT_MODE": self.op.mode,
9657
      "EXPORT_NODE": self.op.target_node,
9658
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
9659
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
9660
      # TODO: Generic function for boolean env variables
9661
      "REMOVE_INSTANCE": str(bool(self.op.remove_instance)),
9662
      }
9663

    
9664
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
9665

    
9666
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node]
9667

    
9668
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
9669
      nl.append(self.op.target_node)
9670

    
9671
    return env, nl, nl
9672

    
9673
  def CheckPrereq(self):
9674
    """Check prerequisites.
9675

9676
    This checks that the instance and node names are valid.
9677

9678
    """
9679
    instance_name = self.op.instance_name
9680

    
9681
    self.instance = self.cfg.GetInstanceInfo(instance_name)
9682
    assert self.instance is not None, \
9683
          "Cannot retrieve locked instance %s" % self.op.instance_name
9684
    _CheckNodeOnline(self, self.instance.primary_node)
9685

    
9686
    if (self.op.remove_instance and self.instance.admin_up and
9687
        not self.op.shutdown):
9688
      raise errors.OpPrereqError("Can not remove instance without shutting it"
9689
                                 " down before")
9690

    
9691
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
9692
      self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
9693
      self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
9694
      assert self.dst_node is not None
9695

    
9696
      _CheckNodeOnline(self, self.dst_node.name)
9697
      _CheckNodeNotDrained(self, self.dst_node.name)
9698

    
9699
      self._cds = None
9700
      self.dest_disk_info = None
9701
      self.dest_x509_ca = None
9702

    
9703
    elif self.op.mode == constants.EXPORT_MODE_REMOTE:
9704
      self.dst_node = None
9705

    
9706
      if len(self.op.target_node) != len(self.instance.disks):
9707
        raise errors.OpPrereqError(("Received destination information for %s"
9708
                                    " disks, but instance %s has %s disks") %
9709
                                   (len(self.op.target_node), instance_name,
9710
                                    len(self.instance.disks)),
9711
                                   errors.ECODE_INVAL)
9712

    
9713
      cds = _GetClusterDomainSecret()
9714

    
9715
      # Check X509 key name
9716
      try:
9717
        (key_name, hmac_digest, hmac_salt) = self.x509_key_name
9718
      except (TypeError, ValueError), err:
9719
        raise errors.OpPrereqError("Invalid data for X509 key name: %s" % err)
9720

    
9721
      if not utils.VerifySha1Hmac(cds, key_name, hmac_digest, salt=hmac_salt):
9722
        raise errors.OpPrereqError("HMAC for X509 key name is wrong",
9723
                                   errors.ECODE_INVAL)
9724

    
9725
      # Load and verify CA
9726
      try:
9727
        (cert, _) = utils.LoadSignedX509Certificate(self.dest_x509_ca_pem, cds)
9728
      except OpenSSL.crypto.Error, err:
9729
        raise errors.OpPrereqError("Unable to load destination X509 CA (%s)" %
9730
                                   (err, ), errors.ECODE_INVAL)
9731

    
9732
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
9733
      if errcode is not None:
9734
        raise errors.OpPrereqError("Invalid destination X509 CA (%s)" %
9735
                                   (msg, ), errors.ECODE_INVAL)
9736

    
9737
      self.dest_x509_ca = cert
9738

    
9739
      # Verify target information
9740
      disk_info = []
9741
      for idx, disk_data in enumerate(self.op.target_node):
9742
        try:
9743
          (host, port, magic) = \
9744
            masterd.instance.CheckRemoteExportDiskInfo(cds, idx, disk_data)
9745
        except errors.GenericError, err:
9746
          raise errors.OpPrereqError("Target info for disk %s: %s" %
9747
                                     (idx, err), errors.ECODE_INVAL)
9748

    
9749
        disk_info.append((host, port, magic))
9750

    
9751
      assert len(disk_info) == len(self.op.target_node)
9752
      self.dest_disk_info = disk_info
9753

    
9754
    else:
9755
      raise errors.ProgrammerError("Unhandled export mode %r" %
9756
                                   self.op.mode)
9757

    
9758
    # instance disk type verification
9759
    # TODO: Implement export support for file-based disks
9760
    for disk in self.instance.disks:
9761
      if disk.dev_type == constants.LD_FILE:
9762
        raise errors.OpPrereqError("Export not supported for instances with"
9763
                                   " file-based disks", errors.ECODE_INVAL)
9764

    
9765
  def _CleanupExports(self, feedback_fn):
9766
    """Removes exports of current instance from all other nodes.
9767

9768
    If an instance in a cluster with nodes A..D was exported to node C, its
9769
    exports will be removed from the nodes A, B and D.
9770

9771
    """
9772
    assert self.op.mode != constants.EXPORT_MODE_REMOTE
9773

    
9774
    nodelist = self.cfg.GetNodeList()
9775
    nodelist.remove(self.dst_node.name)
9776

    
9777
    # on one-node clusters nodelist will be empty after the removal
9778
    # if we proceed the backup would be removed because OpQueryExports
9779
    # substitutes an empty list with the full cluster node list.
9780
    iname = self.instance.name
9781
    if nodelist:
9782
      feedback_fn("Removing old exports for instance %s" % iname)
9783
      exportlist = self.rpc.call_export_list(nodelist)
9784
      for node in exportlist:
9785
        if exportlist[node].fail_msg:
9786
          continue
9787
        if iname in exportlist[node].payload:
9788
          msg = self.rpc.call_export_remove(node, iname).fail_msg
9789
          if msg:
9790
            self.LogWarning("Could not remove older export for instance %s"
9791
                            " on node %s: %s", iname, node, msg)
9792

    
9793
  def Exec(self, feedback_fn):
9794
    """Export an instance to an image in the cluster.
9795

9796
    """
9797
    assert self.op.mode in constants.EXPORT_MODES
9798

    
9799
    instance = self.instance
9800
    src_node = instance.primary_node
9801

    
9802
    if self.op.shutdown:
9803
      # shutdown the instance, but not the disks
9804
      feedback_fn("Shutting down instance %s" % instance.name)
9805
      result = self.rpc.call_instance_shutdown(src_node, instance,
9806
                                               self.op.shutdown_timeout)
9807
      # TODO: Maybe ignore failures if ignore_remove_failures is set
9808
      result.Raise("Could not shutdown instance %s on"
9809
                   " node %s" % (instance.name, src_node))
9810

    
9811
    # set the disks ID correctly since call_instance_start needs the
9812
    # correct drbd minor to create the symlinks
9813
    for disk in instance.disks:
9814
      self.cfg.SetDiskID(disk, src_node)
9815

    
9816
    activate_disks = (not instance.admin_up)
9817

    
9818
    if activate_disks:
9819
      # Activate the instance disks if we'exporting a stopped instance
9820
      feedback_fn("Activating disks for %s" % instance.name)
9821
      _StartInstanceDisks(self, instance, None)
9822

    
9823
    try:
9824
      helper = masterd.instance.ExportInstanceHelper(self, feedback_fn,
9825
                                                     instance)
9826

    
9827
      helper.CreateSnapshots()
9828
      try:
9829
        if (self.op.shutdown and instance.admin_up and
9830
            not self.op.remove_instance):
9831
          assert not activate_disks
9832
          feedback_fn("Starting instance %s" % instance.name)
9833
          result = self.rpc.call_instance_start(src_node, instance, None, None)
9834
          msg = result.fail_msg
9835
          if msg:
9836
            feedback_fn("Failed to start instance: %s" % msg)
9837
            _ShutdownInstanceDisks(self, instance)
9838
            raise errors.OpExecError("Could not start instance: %s" % msg)
9839

    
9840
        if self.op.mode == constants.EXPORT_MODE_LOCAL:
9841
          (fin_resu, dresults) = helper.LocalExport(self.dst_node)
9842
        elif self.op.mode == constants.EXPORT_MODE_REMOTE:
9843
          connect_timeout = constants.RIE_CONNECT_TIMEOUT
9844
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
9845

    
9846
          (key_name, _, _) = self.x509_key_name
9847

    
9848
          dest_ca_pem = \
9849
            OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
9850
                                            self.dest_x509_ca)
9851

    
9852
          (fin_resu, dresults) = helper.RemoteExport(self.dest_disk_info,
9853
                                                     key_name, dest_ca_pem,
9854
                                                     timeouts)
9855
      finally:
9856
        helper.Cleanup()
9857

    
9858
      # Check for backwards compatibility
9859
      assert len(dresults) == len(instance.disks)
9860
      assert compat.all(isinstance(i, bool) for i in dresults), \
9861
             "Not all results are boolean: %r" % dresults
9862

    
9863
    finally:
9864
      if activate_disks:
9865
        feedback_fn("Deactivating disks for %s" % instance.name)
9866
        _ShutdownInstanceDisks(self, instance)
9867

    
9868
    if not (compat.all(dresults) and fin_resu):
9869
      failures = []
9870
      if not fin_resu:
9871
        failures.append("export finalization")
9872
      if not compat.all(dresults):
9873
        fdsk = utils.CommaJoin(idx for (idx, dsk) in enumerate(dresults)
9874
                               if not dsk)
9875
        failures.append("disk export: disk(s) %s" % fdsk)
9876

    
9877
      raise errors.OpExecError("Export failed, errors in %s" %
9878
                               utils.CommaJoin(failures))
9879

    
9880
    # At this point, the export was successful, we can cleanup/finish
9881

    
9882
    # Remove instance if requested
9883
    if self.op.remove_instance:
9884
      feedback_fn("Removing instance %s" % instance.name)
9885
      _RemoveInstance(self, feedback_fn, instance,
9886
                      self.op.ignore_remove_failures)
9887

    
9888
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
9889
      self._CleanupExports(feedback_fn)
9890

    
9891
    return fin_resu, dresults
9892

    
9893

    
9894
class LURemoveExport(NoHooksLU):
9895
  """Remove exports related to the named instance.
9896

9897
  """
9898
  _OP_PARAMS = [
9899
    _PInstanceName,
9900
    ]
9901
  REQ_BGL = False
9902

    
9903
  def ExpandNames(self):
9904
    self.needed_locks = {}
9905
    # We need all nodes to be locked in order for RemoveExport to work, but we
9906
    # don't need to lock the instance itself, as nothing will happen to it (and
9907
    # we can remove exports also for a removed instance)
9908
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9909

    
9910
  def Exec(self, feedback_fn):
9911
    """Remove any export.
9912

9913
    """
9914
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
9915
    # If the instance was not found we'll try with the name that was passed in.
9916
    # This will only work if it was an FQDN, though.
9917
    fqdn_warn = False
9918
    if not instance_name:
9919
      fqdn_warn = True
9920
      instance_name = self.op.instance_name
9921

    
9922
    locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
9923
    exportlist = self.rpc.call_export_list(locked_nodes)
9924
    found = False
9925
    for node in exportlist:
9926
      msg = exportlist[node].fail_msg
9927
      if msg:
9928
        self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
9929
        continue
9930
      if instance_name in exportlist[node].payload:
9931
        found = True
9932
        result = self.rpc.call_export_remove(node, instance_name)
9933
        msg = result.fail_msg
9934
        if msg:
9935
          logging.error("Could not remove export for instance %s"
9936
                        " on node %s: %s", instance_name, node, msg)
9937

    
9938
    if fqdn_warn and not found:
9939
      feedback_fn("Export not found. If trying to remove an export belonging"
9940
                  " to a deleted instance please use its Fully Qualified"
9941
                  " Domain Name.")
9942

    
9943

    
9944
class TagsLU(NoHooksLU): # pylint: disable-msg=W0223
9945
  """Generic tags LU.
9946

9947
  This is an abstract class which is the parent of all the other tags LUs.
9948

9949
  """
9950

    
9951
  def ExpandNames(self):
9952
    self.needed_locks = {}
9953
    if self.op.kind == constants.TAG_NODE:
9954
      self.op.name = _ExpandNodeName(self.cfg, self.op.name)
9955
      self.needed_locks[locking.LEVEL_NODE] = self.op.name
9956
    elif self.op.kind == constants.TAG_INSTANCE:
9957
      self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
9958
      self.needed_locks[locking.LEVEL_INSTANCE] = self.op.name
9959

    
9960
    # FIXME: Acquire BGL for cluster tag operations (as of this writing it's
9961
    # not possible to acquire the BGL based on opcode parameters)
9962

    
9963
  def CheckPrereq(self):
9964
    """Check prerequisites.
9965

9966
    """
9967
    if self.op.kind == constants.TAG_CLUSTER:
9968
      self.target = self.cfg.GetClusterInfo()
9969
    elif self.op.kind == constants.TAG_NODE:
9970
      self.target = self.cfg.GetNodeInfo(self.op.name)
9971
    elif self.op.kind == constants.TAG_INSTANCE:
9972
      self.target = self.cfg.GetInstanceInfo(self.op.name)
9973
    else:
9974
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
9975
                                 str(self.op.kind), errors.ECODE_INVAL)
9976

    
9977

    
9978
class LUGetTags(TagsLU):
9979
  """Returns the tags of a given object.
9980

9981
  """
9982
  _OP_PARAMS = [
9983
    ("kind", ht.NoDefault, ht.TElemOf(constants.VALID_TAG_TYPES)),
9984
    # Name is only meaningful for nodes and instances
9985
    ("name", ht.NoDefault, ht.TMaybeString),
9986
    ]
9987
  REQ_BGL = False
9988

    
9989
  def ExpandNames(self):
9990
    TagsLU.ExpandNames(self)
9991

    
9992
    # Share locks as this is only a read operation
9993
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
9994

    
9995
  def Exec(self, feedback_fn):
9996
    """Returns the tag list.
9997

9998
    """
9999
    return list(self.target.GetTags())
10000

    
10001

    
10002
class LUSearchTags(NoHooksLU):
10003
  """Searches the tags for a given pattern.
10004

10005
  """
10006
  _OP_PARAMS = [
10007
    ("pattern", ht.NoDefault, ht.TNonEmptyString),
10008
    ]
10009
  REQ_BGL = False
10010

    
10011
  def ExpandNames(self):
10012
    self.needed_locks = {}
10013

    
10014
  def CheckPrereq(self):
10015
    """Check prerequisites.
10016

10017
    This checks the pattern passed for validity by compiling it.
10018

10019
    """
10020
    try:
10021
      self.re = re.compile(self.op.pattern)
10022
    except re.error, err:
10023
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
10024
                                 (self.op.pattern, err), errors.ECODE_INVAL)
10025

    
10026
  def Exec(self, feedback_fn):
10027
    """Returns the tag list.
10028

10029
    """
10030
    cfg = self.cfg
10031
    tgts = [("/cluster", cfg.GetClusterInfo())]
10032
    ilist = cfg.GetAllInstancesInfo().values()
10033
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
10034
    nlist = cfg.GetAllNodesInfo().values()
10035
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
10036
    results = []
10037
    for path, target in tgts:
10038
      for tag in target.GetTags():
10039
        if self.re.search(tag):
10040
          results.append((path, tag))
10041
    return results
10042

    
10043

    
10044
class LUAddTags(TagsLU):
10045
  """Sets a tag on a given object.
10046

10047
  """
10048
  _OP_PARAMS = [
10049
    ("kind", ht.NoDefault, ht.TElemOf(constants.VALID_TAG_TYPES)),
10050
    # Name is only meaningful for nodes and instances
10051
    ("name", ht.NoDefault, ht.TMaybeString),
10052
    ("tags", ht.NoDefault, ht.TListOf(ht.TNonEmptyString)),
10053
    ]
10054
  REQ_BGL = False
10055

    
10056
  def CheckPrereq(self):
10057
    """Check prerequisites.
10058

10059
    This checks the type and length of the tag name and value.
10060

10061
    """
10062
    TagsLU.CheckPrereq(self)
10063
    for tag in self.op.tags:
10064
      objects.TaggableObject.ValidateTag(tag)
10065

    
10066
  def Exec(self, feedback_fn):
10067
    """Sets the tag.
10068

10069
    """
10070
    try:
10071
      for tag in self.op.tags:
10072
        self.target.AddTag(tag)
10073
    except errors.TagError, err:
10074
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
10075
    self.cfg.Update(self.target, feedback_fn)
10076

    
10077

    
10078
class LUDelTags(TagsLU):
10079
  """Delete a list of tags from a given object.
10080

10081
  """
10082
  _OP_PARAMS = [
10083
    ("kind", ht.NoDefault, ht.TElemOf(constants.VALID_TAG_TYPES)),
10084
    # Name is only meaningful for nodes and instances
10085
    ("name", ht.NoDefault, ht.TMaybeString),
10086
    ("tags", ht.NoDefault, ht.TListOf(ht.TNonEmptyString)),
10087
    ]
10088
  REQ_BGL = False
10089

    
10090
  def CheckPrereq(self):
10091
    """Check prerequisites.
10092

10093
    This checks that we have the given tag.
10094

10095
    """
10096
    TagsLU.CheckPrereq(self)
10097
    for tag in self.op.tags:
10098
      objects.TaggableObject.ValidateTag(tag)
10099
    del_tags = frozenset(self.op.tags)
10100
    cur_tags = self.target.GetTags()
10101

    
10102
    diff_tags = del_tags - cur_tags
10103
    if diff_tags:
10104
      diff_names = ("'%s'" % i for i in sorted(diff_tags))
10105
      raise errors.OpPrereqError("Tag(s) %s not found" %
10106
                                 (utils.CommaJoin(diff_names), ),
10107
                                 errors.ECODE_NOENT)
10108

    
10109
  def Exec(self, feedback_fn):
10110
    """Remove the tag from the object.
10111

10112
    """
10113
    for tag in self.op.tags:
10114
      self.target.RemoveTag(tag)
10115
    self.cfg.Update(self.target, feedback_fn)
10116

    
10117

    
10118
class LUTestDelay(NoHooksLU):
10119
  """Sleep for a specified amount of time.
10120

10121
  This LU sleeps on the master and/or nodes for a specified amount of
10122
  time.
10123

10124
  """
10125
  _OP_PARAMS = [
10126
    ("duration", ht.NoDefault, ht.TFloat),
10127
    ("on_master", True, ht.TBool),
10128
    ("on_nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString)),
10129
    ("repeat", 0, ht.TPositiveInt)
10130
    ]
10131
  REQ_BGL = False
10132

    
10133
  def ExpandNames(self):
10134
    """Expand names and set required locks.
10135

10136
    This expands the node list, if any.
10137

10138
    """
10139
    self.needed_locks = {}
10140
    if self.op.on_nodes:
10141
      # _GetWantedNodes can be used here, but is not always appropriate to use
10142
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
10143
      # more information.
10144
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
10145
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
10146

    
10147
  def _TestDelay(self):
10148
    """Do the actual sleep.
10149

10150
    """
10151
    if self.op.on_master:
10152
      if not utils.TestDelay(self.op.duration):
10153
        raise errors.OpExecError("Error during master delay test")
10154
    if self.op.on_nodes:
10155
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
10156
      for node, node_result in result.items():
10157
        node_result.Raise("Failure during rpc call to node %s" % node)
10158

    
10159
  def Exec(self, feedback_fn):
10160
    """Execute the test delay opcode, with the wanted repetitions.
10161

10162
    """
10163
    if self.op.repeat == 0:
10164
      self._TestDelay()
10165
    else:
10166
      top_value = self.op.repeat - 1
10167
      for i in range(self.op.repeat):
10168
        self.LogInfo("Test delay iteration %d/%d" % (i, top_value))
10169
        self._TestDelay()
10170

    
10171

    
10172
class LUTestJobqueue(NoHooksLU):
10173
  """Utility LU to test some aspects of the job queue.
10174

10175
  """
10176
  _OP_PARAMS = [
10177
    ("notify_waitlock", False, ht.TBool),
10178
    ("notify_exec", False, ht.TBool),
10179
    ("log_messages", ht.EmptyList, ht.TListOf(ht.TString)),
10180
    ("fail", False, ht.TBool),
10181
    ]
10182
  REQ_BGL = False
10183

    
10184
  # Must be lower than default timeout for WaitForJobChange to see whether it
10185
  # notices changed jobs
10186
  _CLIENT_CONNECT_TIMEOUT = 20.0
10187
  _CLIENT_CONFIRM_TIMEOUT = 60.0
10188

    
10189
  @classmethod
10190
  def _NotifyUsingSocket(cls, cb, errcls):
10191
    """Opens a Unix socket and waits for another program to connect.
10192

10193
    @type cb: callable
10194
    @param cb: Callback to send socket name to client
10195
    @type errcls: class
10196
    @param errcls: Exception class to use for errors
10197

10198
    """
10199
    # Using a temporary directory as there's no easy way to create temporary
10200
    # sockets without writing a custom loop around tempfile.mktemp and
10201
    # socket.bind
10202
    tmpdir = tempfile.mkdtemp()
10203
    try:
10204
      tmpsock = utils.PathJoin(tmpdir, "sock")
10205

    
10206
      logging.debug("Creating temporary socket at %s", tmpsock)
10207
      sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
10208
      try:
10209
        sock.bind(tmpsock)
10210
        sock.listen(1)
10211

    
10212
        # Send details to client
10213
        cb(tmpsock)
10214

    
10215
        # Wait for client to connect before continuing
10216
        sock.settimeout(cls._CLIENT_CONNECT_TIMEOUT)
10217
        try:
10218
          (conn, _) = sock.accept()
10219
        except socket.error, err:
10220
          raise errcls("Client didn't connect in time (%s)" % err)
10221
      finally:
10222
        sock.close()
10223
    finally:
10224
      # Remove as soon as client is connected
10225
      shutil.rmtree(tmpdir)
10226

    
10227
    # Wait for client to close
10228
    try:
10229
      try:
10230
        # pylint: disable-msg=E1101
10231
        # Instance of '_socketobject' has no ... member
10232
        conn.settimeout(cls._CLIENT_CONFIRM_TIMEOUT)
10233
        conn.recv(1)
10234
      except socket.error, err:
10235
        raise errcls("Client failed to confirm notification (%s)" % err)
10236
    finally:
10237
      conn.close()
10238

    
10239
  def _SendNotification(self, test, arg, sockname):
10240
    """Sends a notification to the client.
10241

10242
    @type test: string
10243
    @param test: Test name
10244
    @param arg: Test argument (depends on test)
10245
    @type sockname: string
10246
    @param sockname: Socket path
10247

10248
    """
10249
    self.Log(constants.ELOG_JQUEUE_TEST, (sockname, test, arg))
10250

    
10251
  def _Notify(self, prereq, test, arg):
10252
    """Notifies the client of a test.
10253

10254
    @type prereq: bool
10255
    @param prereq: Whether this is a prereq-phase test
10256
    @type test: string
10257
    @param test: Test name
10258
    @param arg: Test argument (depends on test)
10259

10260
    """
10261
    if prereq:
10262
      errcls = errors.OpPrereqError
10263
    else:
10264
      errcls = errors.OpExecError
10265

    
10266
    return self._NotifyUsingSocket(compat.partial(self._SendNotification,
10267
                                                  test, arg),
10268
                                   errcls)
10269

    
10270
  def CheckArguments(self):
10271
    self.checkargs_calls = getattr(self, "checkargs_calls", 0) + 1
10272
    self.expandnames_calls = 0
10273

    
10274
  def ExpandNames(self):
10275
    checkargs_calls = getattr(self, "checkargs_calls", 0)
10276
    if checkargs_calls < 1:
10277
      raise errors.ProgrammerError("CheckArguments was not called")
10278

    
10279
    self.expandnames_calls += 1
10280

    
10281
    if self.op.notify_waitlock:
10282
      self._Notify(True, constants.JQT_EXPANDNAMES, None)
10283

    
10284
    self.LogInfo("Expanding names")
10285

    
10286
    # Get lock on master node (just to get a lock, not for a particular reason)
10287
    self.needed_locks = {
10288
      locking.LEVEL_NODE: self.cfg.GetMasterNode(),
10289
      }
10290

    
10291
  def Exec(self, feedback_fn):
10292
    if self.expandnames_calls < 1:
10293
      raise errors.ProgrammerError("ExpandNames was not called")
10294

    
10295
    if self.op.notify_exec:
10296
      self._Notify(False, constants.JQT_EXEC, None)
10297

    
10298
    self.LogInfo("Executing")
10299

    
10300
    if self.op.log_messages:
10301
      self._Notify(False, constants.JQT_STARTMSG, len(self.op.log_messages))
10302
      for idx, msg in enumerate(self.op.log_messages):
10303
        self.LogInfo("Sending log message %s", idx + 1)
10304
        feedback_fn(constants.JQT_MSGPREFIX + msg)
10305
        # Report how many test messages have been sent
10306
        self._Notify(False, constants.JQT_LOGMSG, idx + 1)
10307

    
10308
    if self.op.fail:
10309
      raise errors.OpExecError("Opcode failure was requested")
10310

    
10311
    return True
10312

    
10313

    
10314
class IAllocator(object):
10315
  """IAllocator framework.
10316

10317
  An IAllocator instance has three sets of attributes:
10318
    - cfg that is needed to query the cluster
10319
    - input data (all members of the _KEYS class attribute are required)
10320
    - four buffer attributes (in|out_data|text), that represent the
10321
      input (to the external script) in text and data structure format,
10322
      and the output from it, again in two formats
10323
    - the result variables from the script (success, info, nodes) for
10324
      easy usage
10325

10326
  """
10327
  # pylint: disable-msg=R0902
10328
  # lots of instance attributes
10329
  _ALLO_KEYS = [
10330
    "name", "mem_size", "disks", "disk_template",
10331
    "os", "tags", "nics", "vcpus", "hypervisor",
10332
    ]
10333
  _RELO_KEYS = [
10334
    "name", "relocate_from",
10335
    ]
10336
  _EVAC_KEYS = [
10337
    "evac_nodes",
10338
    ]
10339

    
10340
  def __init__(self, cfg, rpc, mode, **kwargs):
10341
    self.cfg = cfg
10342
    self.rpc = rpc
10343
    # init buffer variables
10344
    self.in_text = self.out_text = self.in_data = self.out_data = None
10345
    # init all input fields so that pylint is happy
10346
    self.mode = mode
10347
    self.mem_size = self.disks = self.disk_template = None
10348
    self.os = self.tags = self.nics = self.vcpus = None
10349
    self.hypervisor = None
10350
    self.relocate_from = None
10351
    self.name = None
10352
    self.evac_nodes = None
10353
    # computed fields
10354
    self.required_nodes = None
10355
    # init result fields
10356
    self.success = self.info = self.result = None
10357
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
10358
      keyset = self._ALLO_KEYS
10359
      fn = self._AddNewInstance
10360
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
10361
      keyset = self._RELO_KEYS
10362
      fn = self._AddRelocateInstance
10363
    elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
10364
      keyset = self._EVAC_KEYS
10365
      fn = self._AddEvacuateNodes
10366
    else:
10367
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
10368
                                   " IAllocator" % self.mode)
10369
    for key in kwargs:
10370
      if key not in keyset:
10371
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
10372
                                     " IAllocator" % key)
10373
      setattr(self, key, kwargs[key])
10374

    
10375
    for key in keyset:
10376
      if key not in kwargs:
10377
        raise errors.ProgrammerError("Missing input parameter '%s' to"
10378
                                     " IAllocator" % key)
10379
    self._BuildInputData(fn)
10380

    
10381
  def _ComputeClusterData(self):
10382
    """Compute the generic allocator input data.
10383

10384
    This is the data that is independent of the actual operation.
10385

10386
    """
10387
    cfg = self.cfg
10388
    cluster_info = cfg.GetClusterInfo()
10389
    # cluster data
10390
    data = {
10391
      "version": constants.IALLOCATOR_VERSION,
10392
      "cluster_name": cfg.GetClusterName(),
10393
      "cluster_tags": list(cluster_info.GetTags()),
10394
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
10395
      # we don't have job IDs
10396
      }
10397
    iinfo = cfg.GetAllInstancesInfo().values()
10398
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
10399

    
10400
    # node data
10401
    node_list = cfg.GetNodeList()
10402

    
10403
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
10404
      hypervisor_name = self.hypervisor
10405
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
10406
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
10407
    elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
10408
      hypervisor_name = cluster_info.enabled_hypervisors[0]
10409

    
10410
    node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
10411
                                        hypervisor_name)
10412
    node_iinfo = \
10413
      self.rpc.call_all_instances_info(node_list,
10414
                                       cluster_info.enabled_hypervisors)
10415

    
10416
    data["nodegroups"] = self._ComputeNodeGroupData(cfg)
10417

    
10418
    data["nodes"] = self._ComputeNodeData(cfg, node_data, node_iinfo, i_list)
10419

    
10420
    data["instances"] = self._ComputeInstanceData(cluster_info, i_list)
10421

    
10422
    self.in_data = data
10423

    
10424
  @staticmethod
10425
  def _ComputeNodeGroupData(cfg):
10426
    """Compute node groups data.
10427

10428
    """
10429
    ng = {}
10430
    for guuid, gdata in cfg.GetAllNodeGroupsInfo().items():
10431
      ng[guuid] = { "name": gdata.name }
10432
    return ng
10433

    
10434
  @staticmethod
10435
  def _ComputeNodeData(cfg, node_data, node_iinfo, i_list):
10436
    """Compute global node data.
10437

10438
    """
10439
    node_results = {}
10440
    for nname, nresult in node_data.items():
10441
      # first fill in static (config-based) values
10442
      ninfo = cfg.GetNodeInfo(nname)
10443
      pnr = {
10444
        "tags": list(ninfo.GetTags()),
10445
        "primary_ip": ninfo.primary_ip,
10446
        "secondary_ip": ninfo.secondary_ip,
10447
        "offline": ninfo.offline,
10448
        "drained": ninfo.drained,
10449
        "master_candidate": ninfo.master_candidate,
10450
        "group": ninfo.group,
10451
        "master_capable": ninfo.master_capable,
10452
        "vm_capable": ninfo.vm_capable,
10453
        }
10454

    
10455
      if not (ninfo.offline or ninfo.drained):
10456
        nresult.Raise("Can't get data for node %s" % nname)
10457
        node_iinfo[nname].Raise("Can't get node instance info from node %s" %
10458
                                nname)
10459
        remote_info = nresult.payload
10460

    
10461
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
10462
                     'vg_size', 'vg_free', 'cpu_total']:
10463
          if attr not in remote_info:
10464
            raise errors.OpExecError("Node '%s' didn't return attribute"
10465
                                     " '%s'" % (nname, attr))
10466
          if not isinstance(remote_info[attr], int):
10467
            raise errors.OpExecError("Node '%s' returned invalid value"
10468
                                     " for '%s': %s" %
10469
                                     (nname, attr, remote_info[attr]))
10470
        # compute memory used by primary instances
10471
        i_p_mem = i_p_up_mem = 0
10472
        for iinfo, beinfo in i_list:
10473
          if iinfo.primary_node == nname:
10474
            i_p_mem += beinfo[constants.BE_MEMORY]
10475
            if iinfo.name not in node_iinfo[nname].payload:
10476
              i_used_mem = 0
10477
            else:
10478
              i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory'])
10479
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
10480
            remote_info['memory_free'] -= max(0, i_mem_diff)
10481

    
10482
            if iinfo.admin_up:
10483
              i_p_up_mem += beinfo[constants.BE_MEMORY]
10484

    
10485
        # compute memory used by instances
10486
        pnr_dyn = {
10487
          "total_memory": remote_info['memory_total'],
10488
          "reserved_memory": remote_info['memory_dom0'],
10489
          "free_memory": remote_info['memory_free'],
10490
          "total_disk": remote_info['vg_size'],
10491
          "free_disk": remote_info['vg_free'],
10492
          "total_cpus": remote_info['cpu_total'],
10493
          "i_pri_memory": i_p_mem,
10494
          "i_pri_up_memory": i_p_up_mem,
10495
          }
10496
        pnr.update(pnr_dyn)
10497

    
10498
      node_results[nname] = pnr
10499

    
10500
    return node_results
10501

    
10502
  @staticmethod
10503
  def _ComputeInstanceData(cluster_info, i_list):
10504
    """Compute global instance data.
10505

10506
    """
10507
    instance_data = {}
10508
    for iinfo, beinfo in i_list:
10509
      nic_data = []
10510
      for nic in iinfo.nics:
10511
        filled_params = cluster_info.SimpleFillNIC(nic.nicparams)
10512
        nic_dict = {"mac": nic.mac,
10513
                    "ip": nic.ip,
10514
                    "mode": filled_params[constants.NIC_MODE],
10515
                    "link": filled_params[constants.NIC_LINK],
10516
                   }
10517
        if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
10518
          nic_dict["bridge"] = filled_params[constants.NIC_LINK]
10519
        nic_data.append(nic_dict)
10520
      pir = {
10521
        "tags": list(iinfo.GetTags()),
10522
        "admin_up": iinfo.admin_up,
10523
        "vcpus": beinfo[constants.BE_VCPUS],
10524
        "memory": beinfo[constants.BE_MEMORY],
10525
        "os": iinfo.os,
10526
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
10527
        "nics": nic_data,
10528
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
10529
        "disk_template": iinfo.disk_template,
10530
        "hypervisor": iinfo.hypervisor,
10531
        }
10532
      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
10533
                                                 pir["disks"])
10534
      instance_data[iinfo.name] = pir
10535

    
10536
    return instance_data
10537

    
10538
  def _AddNewInstance(self):
10539
    """Add new instance data to allocator structure.
10540

10541
    This in combination with _AllocatorGetClusterData will create the
10542
    correct structure needed as input for the allocator.
10543

10544
    The checks for the completeness of the opcode must have already been
10545
    done.
10546

10547
    """
10548
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
10549

    
10550
    if self.disk_template in constants.DTS_NET_MIRROR:
10551
      self.required_nodes = 2
10552
    else:
10553
      self.required_nodes = 1
10554
    request = {
10555
      "name": self.name,
10556
      "disk_template": self.disk_template,
10557
      "tags": self.tags,
10558
      "os": self.os,
10559
      "vcpus": self.vcpus,
10560
      "memory": self.mem_size,
10561
      "disks": self.disks,
10562
      "disk_space_total": disk_space,
10563
      "nics": self.nics,
10564
      "required_nodes": self.required_nodes,
10565
      }
10566
    return request
10567

    
10568
  def _AddRelocateInstance(self):
10569
    """Add relocate instance data to allocator structure.
10570

10571
    This in combination with _IAllocatorGetClusterData will create the
10572
    correct structure needed as input for the allocator.
10573

10574
    The checks for the completeness of the opcode must have already been
10575
    done.
10576

10577
    """
10578
    instance = self.cfg.GetInstanceInfo(self.name)
10579
    if instance is None:
10580
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
10581
                                   " IAllocator" % self.name)
10582

    
10583
    if instance.disk_template not in constants.DTS_NET_MIRROR:
10584
      raise errors.OpPrereqError("Can't relocate non-mirrored instances",
10585
                                 errors.ECODE_INVAL)
10586

    
10587
    if len(instance.secondary_nodes) != 1:
10588
      raise errors.OpPrereqError("Instance has not exactly one secondary node",
10589
                                 errors.ECODE_STATE)
10590

    
10591
    self.required_nodes = 1
10592
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
10593
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
10594

    
10595
    request = {
10596
      "name": self.name,
10597
      "disk_space_total": disk_space,
10598
      "required_nodes": self.required_nodes,
10599
      "relocate_from": self.relocate_from,
10600
      }
10601
    return request
10602

    
10603
  def _AddEvacuateNodes(self):
10604
    """Add evacuate nodes data to allocator structure.
10605

10606
    """
10607
    request = {
10608
      "evac_nodes": self.evac_nodes
10609
      }
10610
    return request
10611

    
10612
  def _BuildInputData(self, fn):
10613
    """Build input data structures.
10614

10615
    """
10616
    self._ComputeClusterData()
10617

    
10618
    request = fn()
10619
    request["type"] = self.mode
10620
    self.in_data["request"] = request
10621

    
10622
    self.in_text = serializer.Dump(self.in_data)
10623

    
10624
  def Run(self, name, validate=True, call_fn=None):
10625
    """Run an instance allocator and return the results.
10626

10627
    """
10628
    if call_fn is None:
10629
      call_fn = self.rpc.call_iallocator_runner
10630

    
10631
    result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
10632
    result.Raise("Failure while running the iallocator script")
10633

    
10634
    self.out_text = result.payload
10635
    if validate:
10636
      self._ValidateResult()
10637

    
10638
  def _ValidateResult(self):
10639
    """Process the allocator results.
10640

10641
    This will process and if successful save the result in
10642
    self.out_data and the other parameters.
10643

10644
    """
10645
    try:
10646
      rdict = serializer.Load(self.out_text)
10647
    except Exception, err:
10648
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
10649

    
10650
    if not isinstance(rdict, dict):
10651
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
10652

    
10653
    # TODO: remove backwards compatiblity in later versions
10654
    if "nodes" in rdict and "result" not in rdict:
10655
      rdict["result"] = rdict["nodes"]
10656
      del rdict["nodes"]
10657

    
10658
    for key in "success", "info", "result":
10659
      if key not in rdict:
10660
        raise errors.OpExecError("Can't parse iallocator results:"
10661
                                 " missing key '%s'" % key)
10662
      setattr(self, key, rdict[key])
10663

    
10664
    if not isinstance(rdict["result"], list):
10665
      raise errors.OpExecError("Can't parse iallocator results: 'result' key"
10666
                               " is not a list")
10667
    self.out_data = rdict
10668

    
10669

    
10670
class LUTestAllocator(NoHooksLU):
10671
  """Run allocator tests.
10672

10673
  This LU runs the allocator tests
10674

10675
  """
10676
  _OP_PARAMS = [
10677
    ("direction", ht.NoDefault,
10678
     ht.TElemOf(constants.VALID_IALLOCATOR_DIRECTIONS)),
10679
    ("mode", ht.NoDefault, ht.TElemOf(constants.VALID_IALLOCATOR_MODES)),
10680
    ("name", ht.NoDefault, ht.TNonEmptyString),
10681
    ("nics", ht.NoDefault, ht.TOr(ht.TNone, ht.TListOf(
10682
      ht.TDictOf(ht.TElemOf(["mac", "ip", "bridge"]),
10683
               ht.TOr(ht.TNone, ht.TNonEmptyString))))),
10684
    ("disks", ht.NoDefault, ht.TOr(ht.TNone, ht.TList)),
10685
    ("hypervisor", None, ht.TMaybeString),
10686
    ("allocator", None, ht.TMaybeString),
10687
    ("tags", ht.EmptyList, ht.TListOf(ht.TNonEmptyString)),
10688
    ("mem_size", None, ht.TOr(ht.TNone, ht.TPositiveInt)),
10689
    ("vcpus", None, ht.TOr(ht.TNone, ht.TPositiveInt)),
10690
    ("os", None, ht.TMaybeString),
10691
    ("disk_template", None, ht.TMaybeString),
10692
    ("evac_nodes", None, ht.TOr(ht.TNone, ht.TListOf(ht.TNonEmptyString))),
10693
    ]
10694

    
10695
  def CheckPrereq(self):
10696
    """Check prerequisites.
10697

10698
    This checks the opcode parameters depending on the director and mode test.
10699

10700
    """
10701
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
10702
      for attr in ["mem_size", "disks", "disk_template",
10703
                   "os", "tags", "nics", "vcpus"]:
10704
        if not hasattr(self.op, attr):
10705
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
10706
                                     attr, errors.ECODE_INVAL)
10707
      iname = self.cfg.ExpandInstanceName(self.op.name)
10708
      if iname is not None:
10709
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
10710
                                   iname, errors.ECODE_EXISTS)
10711
      if not isinstance(self.op.nics, list):
10712
        raise errors.OpPrereqError("Invalid parameter 'nics'",
10713
                                   errors.ECODE_INVAL)
10714
      if not isinstance(self.op.disks, list):
10715
        raise errors.OpPrereqError("Invalid parameter 'disks'",
10716
                                   errors.ECODE_INVAL)
10717
      for row in self.op.disks:
10718
        if (not isinstance(row, dict) or
10719
            "size" not in row or
10720
            not isinstance(row["size"], int) or
10721
            "mode" not in row or
10722
            row["mode"] not in ['r', 'w']):
10723
          raise errors.OpPrereqError("Invalid contents of the 'disks'"
10724
                                     " parameter", errors.ECODE_INVAL)
10725
      if self.op.hypervisor is None:
10726
        self.op.hypervisor = self.cfg.GetHypervisorType()
10727
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
10728
      fname = _ExpandInstanceName(self.cfg, self.op.name)
10729
      self.op.name = fname
10730
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
10731
    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
10732
      if not hasattr(self.op, "evac_nodes"):
10733
        raise errors.OpPrereqError("Missing attribute 'evac_nodes' on"
10734
                                   " opcode input", errors.ECODE_INVAL)
10735
    else:
10736
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
10737
                                 self.op.mode, errors.ECODE_INVAL)
10738

    
10739
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
10740
      if self.op.allocator is None:
10741
        raise errors.OpPrereqError("Missing allocator name",
10742
                                   errors.ECODE_INVAL)
10743
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
10744
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
10745
                                 self.op.direction, errors.ECODE_INVAL)
10746

    
10747
  def Exec(self, feedback_fn):
10748
    """Run the allocator test.
10749

10750
    """
10751
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
10752
      ial = IAllocator(self.cfg, self.rpc,
10753
                       mode=self.op.mode,
10754
                       name=self.op.name,
10755
                       mem_size=self.op.mem_size,
10756
                       disks=self.op.disks,
10757
                       disk_template=self.op.disk_template,
10758
                       os=self.op.os,
10759
                       tags=self.op.tags,
10760
                       nics=self.op.nics,
10761
                       vcpus=self.op.vcpus,
10762
                       hypervisor=self.op.hypervisor,
10763
                       )
10764
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
10765
      ial = IAllocator(self.cfg, self.rpc,
10766
                       mode=self.op.mode,
10767
                       name=self.op.name,
10768
                       relocate_from=list(self.relocate_from),
10769
                       )
10770
    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
10771
      ial = IAllocator(self.cfg, self.rpc,
10772
                       mode=self.op.mode,
10773
                       evac_nodes=self.op.evac_nodes)
10774
    else:
10775
      raise errors.ProgrammerError("Uncatched mode %s in"
10776
                                   " LUTestAllocator.Exec", self.op.mode)
10777

    
10778
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
10779
      result = ial.in_text
10780
    else:
10781
      ial.Run(self.op.allocator, validate=False)
10782
      result = ial.out_text
10783
    return result