Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 12649e35

History | View | Annotate | Download (234.3 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the master-side code."""
23

    
24
# pylint: disable-msg=W0613,W0201
25

    
26
import os
27
import os.path
28
import sha
29
import time
30
import tempfile
31
import re
32
import platform
33
import logging
34
import copy
35
import random
36

    
37
from ganeti import ssh
38
from ganeti import utils
39
from ganeti import errors
40
from ganeti import hypervisor
41
from ganeti import locking
42
from ganeti import constants
43
from ganeti import objects
44
from ganeti import opcodes
45
from ganeti import serializer
46
from ganeti import ssconf
47

    
48

    
49
class LogicalUnit(object):
50
  """Logical Unit base class.
51

52
  Subclasses must follow these rules:
53
    - implement ExpandNames
54
    - implement CheckPrereq
55
    - implement Exec
56
    - implement BuildHooksEnv
57
    - redefine HPATH and HTYPE
58
    - optionally redefine their run requirements:
59
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
60

61
  Note that all commands require root permissions.
62

63
  """
64
  HPATH = None
65
  HTYPE = None
66
  _OP_REQP = []
67
  REQ_BGL = True
68

    
69
  def __init__(self, processor, op, context, rpc):
70
    """Constructor for LogicalUnit.
71

72
    This needs to be overriden in derived classes in order to check op
73
    validity.
74

75
    """
76
    self.proc = processor
77
    self.op = op
78
    self.cfg = context.cfg
79
    self.context = context
80
    self.rpc = rpc
81
    # Dicts used to declare locking needs to mcpu
82
    self.needed_locks = None
83
    self.acquired_locks = {}
84
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
85
    self.add_locks = {}
86
    self.remove_locks = {}
87
    # Used to force good behavior when calling helper functions
88
    self.recalculate_locks = {}
89
    self.__ssh = None
90
    # logging
91
    self.LogWarning = processor.LogWarning
92
    self.LogInfo = processor.LogInfo
93

    
94
    for attr_name in self._OP_REQP:
95
      attr_val = getattr(op, attr_name, None)
96
      if attr_val is None:
97
        raise errors.OpPrereqError("Required parameter '%s' missing" %
98
                                   attr_name)
99
    self.CheckArguments()
100

    
101
  def __GetSSH(self):
102
    """Returns the SshRunner object
103

104
    """
105
    if not self.__ssh:
106
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
107
    return self.__ssh
108

    
109
  ssh = property(fget=__GetSSH)
110

    
111
  def CheckArguments(self):
112
    """Check syntactic validity for the opcode arguments.
113

114
    This method is for doing a simple syntactic check and ensure
115
    validity of opcode parameters, without any cluster-related
116
    checks. While the same can be accomplished in ExpandNames and/or
117
    CheckPrereq, doing these separate is better because:
118

119
      - ExpandNames is left as as purely a lock-related function
120
      - CheckPrereq is run after we have aquired locks (and possible
121
        waited for them)
122

123
    The function is allowed to change the self.op attribute so that
124
    later methods can no longer worry about missing parameters.
125

126
    """
127
    pass
128

    
129
  def ExpandNames(self):
130
    """Expand names for this LU.
131

132
    This method is called before starting to execute the opcode, and it should
133
    update all the parameters of the opcode to their canonical form (e.g. a
134
    short node name must be fully expanded after this method has successfully
135
    completed). This way locking, hooks, logging, ecc. can work correctly.
136

137
    LUs which implement this method must also populate the self.needed_locks
138
    member, as a dict with lock levels as keys, and a list of needed lock names
139
    as values. Rules:
140

141
      - use an empty dict if you don't need any lock
142
      - if you don't need any lock at a particular level omit that level
143
      - don't put anything for the BGL level
144
      - if you want all locks at a level use locking.ALL_SET as a value
145

146
    If you need to share locks (rather than acquire them exclusively) at one
147
    level you can modify self.share_locks, setting a true value (usually 1) for
148
    that level. By default locks are not shared.
149

150
    Examples::
151

152
      # Acquire all nodes and one instance
153
      self.needed_locks = {
154
        locking.LEVEL_NODE: locking.ALL_SET,
155
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
156
      }
157
      # Acquire just two nodes
158
      self.needed_locks = {
159
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
160
      }
161
      # Acquire no locks
162
      self.needed_locks = {} # No, you can't leave it to the default value None
163

164
    """
165
    # The implementation of this method is mandatory only if the new LU is
166
    # concurrent, so that old LUs don't need to be changed all at the same
167
    # time.
168
    if self.REQ_BGL:
169
      self.needed_locks = {} # Exclusive LUs don't need locks.
170
    else:
171
      raise NotImplementedError
172

    
173
  def DeclareLocks(self, level):
174
    """Declare LU locking needs for a level
175

176
    While most LUs can just declare their locking needs at ExpandNames time,
177
    sometimes there's the need to calculate some locks after having acquired
178
    the ones before. This function is called just before acquiring locks at a
179
    particular level, but after acquiring the ones at lower levels, and permits
180
    such calculations. It can be used to modify self.needed_locks, and by
181
    default it does nothing.
182

183
    This function is only called if you have something already set in
184
    self.needed_locks for the level.
185

186
    @param level: Locking level which is going to be locked
187
    @type level: member of ganeti.locking.LEVELS
188

189
    """
190

    
191
  def CheckPrereq(self):
192
    """Check prerequisites for this LU.
193

194
    This method should check that the prerequisites for the execution
195
    of this LU are fulfilled. It can do internode communication, but
196
    it should be idempotent - no cluster or system changes are
197
    allowed.
198

199
    The method should raise errors.OpPrereqError in case something is
200
    not fulfilled. Its return value is ignored.
201

202
    This method should also update all the parameters of the opcode to
203
    their canonical form if it hasn't been done by ExpandNames before.
204

205
    """
206
    raise NotImplementedError
207

    
208
  def Exec(self, feedback_fn):
209
    """Execute the LU.
210

211
    This method should implement the actual work. It should raise
212
    errors.OpExecError for failures that are somewhat dealt with in
213
    code, or expected.
214

215
    """
216
    raise NotImplementedError
217

    
218
  def BuildHooksEnv(self):
219
    """Build hooks environment for this LU.
220

221
    This method should return a three-node tuple consisting of: a dict
222
    containing the environment that will be used for running the
223
    specific hook for this LU, a list of node names on which the hook
224
    should run before the execution, and a list of node names on which
225
    the hook should run after the execution.
226

227
    The keys of the dict must not have 'GANETI_' prefixed as this will
228
    be handled in the hooks runner. Also note additional keys will be
229
    added by the hooks runner. If the LU doesn't define any
230
    environment, an empty dict (and not None) should be returned.
231

232
    No nodes should be returned as an empty list (and not None).
233

234
    Note that if the HPATH for a LU class is None, this function will
235
    not be called.
236

237
    """
238
    raise NotImplementedError
239

    
240
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
241
    """Notify the LU about the results of its hooks.
242

243
    This method is called every time a hooks phase is executed, and notifies
244
    the Logical Unit about the hooks' result. The LU can then use it to alter
245
    its result based on the hooks.  By default the method does nothing and the
246
    previous result is passed back unchanged but any LU can define it if it
247
    wants to use the local cluster hook-scripts somehow.
248

249
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
250
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
251
    @param hook_results: the results of the multi-node hooks rpc call
252
    @param feedback_fn: function used send feedback back to the caller
253
    @param lu_result: the previous Exec result this LU had, or None
254
        in the PRE phase
255
    @return: the new Exec result, based on the previous result
256
        and hook results
257

258
    """
259
    return lu_result
260

    
261
  def _ExpandAndLockInstance(self):
262
    """Helper function to expand and lock an instance.
263

264
    Many LUs that work on an instance take its name in self.op.instance_name
265
    and need to expand it and then declare the expanded name for locking. This
266
    function does it, and then updates self.op.instance_name to the expanded
267
    name. It also initializes needed_locks as a dict, if this hasn't been done
268
    before.
269

270
    """
271
    if self.needed_locks is None:
272
      self.needed_locks = {}
273
    else:
274
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
275
        "_ExpandAndLockInstance called with instance-level locks set"
276
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
277
    if expanded_name is None:
278
      raise errors.OpPrereqError("Instance '%s' not known" %
279
                                  self.op.instance_name)
280
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
281
    self.op.instance_name = expanded_name
282

    
283
  def _LockInstancesNodes(self, primary_only=False):
284
    """Helper function to declare instances' nodes for locking.
285

286
    This function should be called after locking one or more instances to lock
287
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
288
    with all primary or secondary nodes for instances already locked and
289
    present in self.needed_locks[locking.LEVEL_INSTANCE].
290

291
    It should be called from DeclareLocks, and for safety only works if
292
    self.recalculate_locks[locking.LEVEL_NODE] is set.
293

294
    In the future it may grow parameters to just lock some instance's nodes, or
295
    to just lock primaries or secondary nodes, if needed.
296

297
    If should be called in DeclareLocks in a way similar to::
298

299
      if level == locking.LEVEL_NODE:
300
        self._LockInstancesNodes()
301

302
    @type primary_only: boolean
303
    @param primary_only: only lock primary nodes of locked instances
304

305
    """
306
    assert locking.LEVEL_NODE in self.recalculate_locks, \
307
      "_LockInstancesNodes helper function called with no nodes to recalculate"
308

    
309
    # TODO: check if we're really been called with the instance locks held
310

    
311
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
312
    # future we might want to have different behaviors depending on the value
313
    # of self.recalculate_locks[locking.LEVEL_NODE]
314
    wanted_nodes = []
315
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
316
      instance = self.context.cfg.GetInstanceInfo(instance_name)
317
      wanted_nodes.append(instance.primary_node)
318
      if not primary_only:
319
        wanted_nodes.extend(instance.secondary_nodes)
320

    
321
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
322
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
323
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
324
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
325

    
326
    del self.recalculate_locks[locking.LEVEL_NODE]
327

    
328

    
329
class NoHooksLU(LogicalUnit):
330
  """Simple LU which runs no hooks.
331

332
  This LU is intended as a parent for other LogicalUnits which will
333
  run no hooks, in order to reduce duplicate code.
334

335
  """
336
  HPATH = None
337
  HTYPE = None
338

    
339

    
340
def _GetWantedNodes(lu, nodes):
341
  """Returns list of checked and expanded node names.
342

343
  @type lu: L{LogicalUnit}
344
  @param lu: the logical unit on whose behalf we execute
345
  @type nodes: list
346
  @param nodes: list of node names or None for all nodes
347
  @rtype: list
348
  @return: the list of nodes, sorted
349
  @raise errors.OpProgrammerError: if the nodes parameter is wrong type
350

351
  """
352
  if not isinstance(nodes, list):
353
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
354

    
355
  if not nodes:
356
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
357
      " non-empty list of nodes whose name is to be expanded.")
358

    
359
  wanted = []
360
  for name in nodes:
361
    node = lu.cfg.ExpandNodeName(name)
362
    if node is None:
363
      raise errors.OpPrereqError("No such node name '%s'" % name)
364
    wanted.append(node)
365

    
366
  return utils.NiceSort(wanted)
367

    
368

    
369
def _GetWantedInstances(lu, instances):
370
  """Returns list of checked and expanded instance names.
371

372
  @type lu: L{LogicalUnit}
373
  @param lu: the logical unit on whose behalf we execute
374
  @type instances: list
375
  @param instances: list of instance names or None for all instances
376
  @rtype: list
377
  @return: the list of instances, sorted
378
  @raise errors.OpPrereqError: if the instances parameter is wrong type
379
  @raise errors.OpPrereqError: if any of the passed instances is not found
380

381
  """
382
  if not isinstance(instances, list):
383
    raise errors.OpPrereqError("Invalid argument type 'instances'")
384

    
385
  if instances:
386
    wanted = []
387

    
388
    for name in instances:
389
      instance = lu.cfg.ExpandInstanceName(name)
390
      if instance is None:
391
        raise errors.OpPrereqError("No such instance name '%s'" % name)
392
      wanted.append(instance)
393

    
394
  else:
395
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
396
  return wanted
397

    
398

    
399
def _CheckOutputFields(static, dynamic, selected):
400
  """Checks whether all selected fields are valid.
401

402
  @type static: L{utils.FieldSet}
403
  @param static: static fields set
404
  @type dynamic: L{utils.FieldSet}
405
  @param dynamic: dynamic fields set
406

407
  """
408
  f = utils.FieldSet()
409
  f.Extend(static)
410
  f.Extend(dynamic)
411

    
412
  delta = f.NonMatching(selected)
413
  if delta:
414
    raise errors.OpPrereqError("Unknown output fields selected: %s"
415
                               % ",".join(delta))
416

    
417

    
418
def _CheckBooleanOpField(op, name):
419
  """Validates boolean opcode parameters.
420

421
  This will ensure that an opcode parameter is either a boolean value,
422
  or None (but that it always exists).
423

424
  """
425
  val = getattr(op, name, None)
426
  if not (val is None or isinstance(val, bool)):
427
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
428
                               (name, str(val)))
429
  setattr(op, name, val)
430

    
431

    
432
def _CheckNodeOnline(lu, node):
433
  """Ensure that a given node is online.
434

435
  @param lu: the LU on behalf of which we make the check
436
  @param node: the node to check
437
  @raise errors.OpPrereqError: if the nodes is offline
438

439
  """
440
  if lu.cfg.GetNodeInfo(node).offline:
441
    raise errors.OpPrereqError("Can't use offline node %s" % node)
442

    
443

    
444
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
445
                          memory, vcpus, nics):
446
  """Builds instance related env variables for hooks
447

448
  This builds the hook environment from individual variables.
449

450
  @type name: string
451
  @param name: the name of the instance
452
  @type primary_node: string
453
  @param primary_node: the name of the instance's primary node
454
  @type secondary_nodes: list
455
  @param secondary_nodes: list of secondary nodes as strings
456
  @type os_type: string
457
  @param os_type: the name of the instance's OS
458
  @type status: boolean
459
  @param status: the should_run status of the instance
460
  @type memory: string
461
  @param memory: the memory size of the instance
462
  @type vcpus: string
463
  @param vcpus: the count of VCPUs the instance has
464
  @type nics: list
465
  @param nics: list of tuples (ip, bridge, mac) representing
466
      the NICs the instance  has
467
  @rtype: dict
468
  @return: the hook environment for this instance
469

470
  """
471
  if status:
472
    str_status = "up"
473
  else:
474
    str_status = "down"
475
  env = {
476
    "OP_TARGET": name,
477
    "INSTANCE_NAME": name,
478
    "INSTANCE_PRIMARY": primary_node,
479
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
480
    "INSTANCE_OS_TYPE": os_type,
481
    "INSTANCE_STATUS": str_status,
482
    "INSTANCE_MEMORY": memory,
483
    "INSTANCE_VCPUS": vcpus,
484
  }
485

    
486
  if nics:
487
    nic_count = len(nics)
488
    for idx, (ip, bridge, mac) in enumerate(nics):
489
      if ip is None:
490
        ip = ""
491
      env["INSTANCE_NIC%d_IP" % idx] = ip
492
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
493
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
494
  else:
495
    nic_count = 0
496

    
497
  env["INSTANCE_NIC_COUNT"] = nic_count
498

    
499
  return env
500

    
501

    
502
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
503
  """Builds instance related env variables for hooks from an object.
504

505
  @type lu: L{LogicalUnit}
506
  @param lu: the logical unit on whose behalf we execute
507
  @type instance: L{objects.Instance}
508
  @param instance: the instance for which we should build the
509
      environment
510
  @type override: dict
511
  @param override: dictionary with key/values that will override
512
      our values
513
  @rtype: dict
514
  @return: the hook environment dictionary
515

516
  """
517
  bep = lu.cfg.GetClusterInfo().FillBE(instance)
518
  args = {
519
    'name': instance.name,
520
    'primary_node': instance.primary_node,
521
    'secondary_nodes': instance.secondary_nodes,
522
    'os_type': instance.os,
523
    'status': instance.admin_up,
524
    'memory': bep[constants.BE_MEMORY],
525
    'vcpus': bep[constants.BE_VCPUS],
526
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
527
  }
528
  if override:
529
    args.update(override)
530
  return _BuildInstanceHookEnv(**args)
531

    
532

    
533
def _AdjustCandidatePool(lu):
534
  """Adjust the candidate pool after node operations.
535

536
  """
537
  mod_list = lu.cfg.MaintainCandidatePool()
538
  if mod_list:
539
    lu.LogInfo("Promoted nodes to master candidate role: %s",
540
               ", ".join(node.name for node in mod_list))
541
    for name in mod_list:
542
      lu.context.ReaddNode(name)
543
  mc_now, mc_max = lu.cfg.GetMasterCandidateStats()
544
  if mc_now > mc_max:
545
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
546
               (mc_now, mc_max))
547

    
548

    
549
def _CheckInstanceBridgesExist(lu, instance):
550
  """Check that the brigdes needed by an instance exist.
551

552
  """
553
  # check bridges existance
554
  brlist = [nic.bridge for nic in instance.nics]
555
  result = lu.rpc.call_bridges_exist(instance.primary_node, brlist)
556
  result.Raise()
557
  if not result.data:
558
    raise errors.OpPrereqError("One or more target bridges %s does not"
559
                               " exist on destination node '%s'" %
560
                               (brlist, instance.primary_node))
561

    
562

    
563
class LUDestroyCluster(NoHooksLU):
564
  """Logical unit for destroying the cluster.
565

566
  """
567
  _OP_REQP = []
568

    
569
  def CheckPrereq(self):
570
    """Check prerequisites.
571

572
    This checks whether the cluster is empty.
573

574
    Any errors are signalled by raising errors.OpPrereqError.
575

576
    """
577
    master = self.cfg.GetMasterNode()
578

    
579
    nodelist = self.cfg.GetNodeList()
580
    if len(nodelist) != 1 or nodelist[0] != master:
581
      raise errors.OpPrereqError("There are still %d node(s) in"
582
                                 " this cluster." % (len(nodelist) - 1))
583
    instancelist = self.cfg.GetInstanceList()
584
    if instancelist:
585
      raise errors.OpPrereqError("There are still %d instance(s) in"
586
                                 " this cluster." % len(instancelist))
587

    
588
  def Exec(self, feedback_fn):
589
    """Destroys the cluster.
590

591
    """
592
    master = self.cfg.GetMasterNode()
593
    result = self.rpc.call_node_stop_master(master, False)
594
    result.Raise()
595
    if not result.data:
596
      raise errors.OpExecError("Could not disable the master role")
597
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
598
    utils.CreateBackup(priv_key)
599
    utils.CreateBackup(pub_key)
600
    return master
601

    
602

    
603
class LUVerifyCluster(LogicalUnit):
604
  """Verifies the cluster status.
605

606
  """
607
  HPATH = "cluster-verify"
608
  HTYPE = constants.HTYPE_CLUSTER
609
  _OP_REQP = ["skip_checks"]
610
  REQ_BGL = False
611

    
612
  def ExpandNames(self):
613
    self.needed_locks = {
614
      locking.LEVEL_NODE: locking.ALL_SET,
615
      locking.LEVEL_INSTANCE: locking.ALL_SET,
616
    }
617
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
618

    
619
  def _VerifyNode(self, nodeinfo, file_list, local_cksum,
620
                  node_result, feedback_fn, master_files,
621
                  drbd_map):
622
    """Run multiple tests against a node.
623

624
    Test list:
625

626
      - compares ganeti version
627
      - checks vg existance and size > 20G
628
      - checks config file checksum
629
      - checks ssh to other nodes
630

631
    @type nodeinfo: L{objects.Node}
632
    @param nodeinfo: the node to check
633
    @param file_list: required list of files
634
    @param local_cksum: dictionary of local files and their checksums
635
    @param node_result: the results from the node
636
    @param feedback_fn: function used to accumulate results
637
    @param master_files: list of files that only masters should have
638
    @param drbd_map: the useddrbd minors for this node, in
639
        form of minor: (instance, must_exist) which correspond to instances
640
        and their running status
641

642
    """
643
    node = nodeinfo.name
644

    
645
    # main result, node_result should be a non-empty dict
646
    if not node_result or not isinstance(node_result, dict):
647
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
648
      return True
649

    
650
    # compares ganeti version
651
    local_version = constants.PROTOCOL_VERSION
652
    remote_version = node_result.get('version', None)
653
    if not (remote_version and isinstance(remote_version, (list, tuple)) and
654
            len(remote_version) == 2):
655
      feedback_fn("  - ERROR: connection to %s failed" % (node))
656
      return True
657

    
658
    if local_version != remote_version[0]:
659
      feedback_fn("  - ERROR: incompatible protocol versions: master %s,"
660
                  " node %s %s" % (local_version, node, remote_version[0]))
661
      return True
662

    
663
    # node seems compatible, we can actually try to look into its results
664

    
665
    bad = False
666

    
667
    # full package version
668
    if constants.RELEASE_VERSION != remote_version[1]:
669
      feedback_fn("  - WARNING: software version mismatch: master %s,"
670
                  " node %s %s" %
671
                  (constants.RELEASE_VERSION, node, remote_version[1]))
672

    
673
    # checks vg existence and size > 20G
674

    
675
    vglist = node_result.get(constants.NV_VGLIST, None)
676
    if not vglist:
677
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
678
                      (node,))
679
      bad = True
680
    else:
681
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
682
                                            constants.MIN_VG_SIZE)
683
      if vgstatus:
684
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
685
        bad = True
686

    
687
    # checks config file checksum
688

    
689
    remote_cksum = node_result.get(constants.NV_FILELIST, None)
690
    if not isinstance(remote_cksum, dict):
691
      bad = True
692
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
693
    else:
694
      for file_name in file_list:
695
        node_is_mc = nodeinfo.master_candidate
696
        must_have_file = file_name not in master_files
697
        if file_name not in remote_cksum:
698
          if node_is_mc or must_have_file:
699
            bad = True
700
            feedback_fn("  - ERROR: file '%s' missing" % file_name)
701
        elif remote_cksum[file_name] != local_cksum[file_name]:
702
          if node_is_mc or must_have_file:
703
            bad = True
704
            feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
705
          else:
706
            # not candidate and this is not a must-have file
707
            bad = True
708
            feedback_fn("  - ERROR: non master-candidate has old/wrong file"
709
                        " '%s'" % file_name)
710
        else:
711
          # all good, except non-master/non-must have combination
712
          if not node_is_mc and not must_have_file:
713
            feedback_fn("  - ERROR: file '%s' should not exist on non master"
714
                        " candidates" % file_name)
715

    
716
    # checks ssh to any
717

    
718
    if constants.NV_NODELIST not in node_result:
719
      bad = True
720
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
721
    else:
722
      if node_result[constants.NV_NODELIST]:
723
        bad = True
724
        for node in node_result[constants.NV_NODELIST]:
725
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
726
                          (node, node_result[constants.NV_NODELIST][node]))
727

    
728
    if constants.NV_NODENETTEST not in node_result:
729
      bad = True
730
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
731
    else:
732
      if node_result[constants.NV_NODENETTEST]:
733
        bad = True
734
        nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys())
735
        for node in nlist:
736
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
737
                          (node, node_result[constants.NV_NODENETTEST][node]))
738

    
739
    hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
740
    if isinstance(hyp_result, dict):
741
      for hv_name, hv_result in hyp_result.iteritems():
742
        if hv_result is not None:
743
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
744
                      (hv_name, hv_result))
745

    
746
    # check used drbd list
747
    used_minors = node_result.get(constants.NV_DRBDLIST, [])
748
    for minor, (iname, must_exist) in drbd_map.items():
749
      if minor not in used_minors and must_exist:
750
        feedback_fn("  - ERROR: drbd minor %d of instance %s is not active" %
751
                    (minor, iname))
752
        bad = True
753
    for minor in used_minors:
754
      if minor not in drbd_map:
755
        feedback_fn("  - ERROR: unallocated drbd minor %d is in use" % minor)
756
        bad = True
757

    
758
    return bad
759

    
760
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
761
                      node_instance, feedback_fn, n_offline):
762
    """Verify an instance.
763

764
    This function checks to see if the required block devices are
765
    available on the instance's node.
766

767
    """
768
    bad = False
769

    
770
    node_current = instanceconfig.primary_node
771

    
772
    node_vol_should = {}
773
    instanceconfig.MapLVsByNode(node_vol_should)
774

    
775
    for node in node_vol_should:
776
      if node in n_offline:
777
        # ignore missing volumes on offline nodes
778
        continue
779
      for volume in node_vol_should[node]:
780
        if node not in node_vol_is or volume not in node_vol_is[node]:
781
          feedback_fn("  - ERROR: volume %s missing on node %s" %
782
                          (volume, node))
783
          bad = True
784

    
785
    if instanceconfig.admin_up:
786
      if ((node_current not in node_instance or
787
          not instance in node_instance[node_current]) and
788
          node_current not in n_offline):
789
        feedback_fn("  - ERROR: instance %s not running on node %s" %
790
                        (instance, node_current))
791
        bad = True
792

    
793
    for node in node_instance:
794
      if (not node == node_current):
795
        if instance in node_instance[node]:
796
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
797
                          (instance, node))
798
          bad = True
799

    
800
    return bad
801

    
802
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
803
    """Verify if there are any unknown volumes in the cluster.
804

805
    The .os, .swap and backup volumes are ignored. All other volumes are
806
    reported as unknown.
807

808
    """
809
    bad = False
810

    
811
    for node in node_vol_is:
812
      for volume in node_vol_is[node]:
813
        if node not in node_vol_should or volume not in node_vol_should[node]:
814
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
815
                      (volume, node))
816
          bad = True
817
    return bad
818

    
819
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
820
    """Verify the list of running instances.
821

822
    This checks what instances are running but unknown to the cluster.
823

824
    """
825
    bad = False
826
    for node in node_instance:
827
      for runninginstance in node_instance[node]:
828
        if runninginstance not in instancelist:
829
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
830
                          (runninginstance, node))
831
          bad = True
832
    return bad
833

    
834
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
835
    """Verify N+1 Memory Resilience.
836

837
    Check that if one single node dies we can still start all the instances it
838
    was primary for.
839

840
    """
841
    bad = False
842

    
843
    for node, nodeinfo in node_info.iteritems():
844
      # This code checks that every node which is now listed as secondary has
845
      # enough memory to host all instances it is supposed to should a single
846
      # other node in the cluster fail.
847
      # FIXME: not ready for failover to an arbitrary node
848
      # FIXME: does not support file-backed instances
849
      # WARNING: we currently take into account down instances as well as up
850
      # ones, considering that even if they're down someone might want to start
851
      # them even in the event of a node failure.
852
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
853
        needed_mem = 0
854
        for instance in instances:
855
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
856
          if bep[constants.BE_AUTO_BALANCE]:
857
            needed_mem += bep[constants.BE_MEMORY]
858
        if nodeinfo['mfree'] < needed_mem:
859
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
860
                      " failovers should node %s fail" % (node, prinode))
861
          bad = True
862
    return bad
863

    
864
  def CheckPrereq(self):
865
    """Check prerequisites.
866

867
    Transform the list of checks we're going to skip into a set and check that
868
    all its members are valid.
869

870
    """
871
    self.skip_set = frozenset(self.op.skip_checks)
872
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
873
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
874

    
875
  def BuildHooksEnv(self):
876
    """Build hooks env.
877

878
    Cluster-Verify hooks just rone in the post phase and their failure makes
879
    the output be logged in the verify output and the verification to fail.
880

881
    """
882
    all_nodes = self.cfg.GetNodeList()
883
    # TODO: populate the environment with useful information for verify hooks
884
    env = {}
885
    return env, [], all_nodes
886

    
887
  def Exec(self, feedback_fn):
888
    """Verify integrity of cluster, performing various test on nodes.
889

890
    """
891
    bad = False
892
    feedback_fn("* Verifying global settings")
893
    for msg in self.cfg.VerifyConfig():
894
      feedback_fn("  - ERROR: %s" % msg)
895

    
896
    vg_name = self.cfg.GetVGName()
897
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
898
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
899
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
900
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
901
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
902
                        for iname in instancelist)
903
    i_non_redundant = [] # Non redundant instances
904
    i_non_a_balanced = [] # Non auto-balanced instances
905
    n_offline = [] # List of offline nodes
906
    node_volume = {}
907
    node_instance = {}
908
    node_info = {}
909
    instance_cfg = {}
910

    
911
    # FIXME: verify OS list
912
    # do local checksums
913
    master_files = [constants.CLUSTER_CONF_FILE]
914

    
915
    file_names = ssconf.SimpleStore().GetFileList()
916
    file_names.append(constants.SSL_CERT_FILE)
917
    file_names.append(constants.RAPI_CERT_FILE)
918
    file_names.extend(master_files)
919

    
920
    local_checksums = utils.FingerprintFiles(file_names)
921

    
922
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
923
    node_verify_param = {
924
      constants.NV_FILELIST: file_names,
925
      constants.NV_NODELIST: [node.name for node in nodeinfo
926
                              if not node.offline],
927
      constants.NV_HYPERVISOR: hypervisors,
928
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
929
                                  node.secondary_ip) for node in nodeinfo
930
                                 if not node.offline],
931
      constants.NV_LVLIST: vg_name,
932
      constants.NV_INSTANCELIST: hypervisors,
933
      constants.NV_VGLIST: None,
934
      constants.NV_VERSION: None,
935
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
936
      constants.NV_DRBDLIST: None,
937
      }
938
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
939
                                           self.cfg.GetClusterName())
940

    
941
    cluster = self.cfg.GetClusterInfo()
942
    master_node = self.cfg.GetMasterNode()
943
    all_drbd_map = self.cfg.ComputeDRBDMap()
944

    
945
    for node_i in nodeinfo:
946
      node = node_i.name
947
      nresult = all_nvinfo[node].data
948

    
949
      if node_i.offline:
950
        feedback_fn("* Skipping offline node %s" % (node,))
951
        n_offline.append(node)
952
        continue
953

    
954
      if node == master_node:
955
        ntype = "master"
956
      elif node_i.master_candidate:
957
        ntype = "master candidate"
958
      else:
959
        ntype = "regular"
960
      feedback_fn("* Verifying node %s (%s)" % (node, ntype))
961

    
962
      if all_nvinfo[node].failed or not isinstance(nresult, dict):
963
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
964
        bad = True
965
        continue
966

    
967
      node_drbd = {}
968
      for minor, instance in all_drbd_map[node].items():
969
        instance = instanceinfo[instance]
970
        node_drbd[minor] = (instance.name, instance.admin_up)
971
      result = self._VerifyNode(node_i, file_names, local_checksums,
972
                                nresult, feedback_fn, master_files,
973
                                node_drbd)
974
      bad = bad or result
975

    
976
      lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
977
      if isinstance(lvdata, basestring):
978
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
979
                    (node, utils.SafeEncode(lvdata)))
980
        bad = True
981
        node_volume[node] = {}
982
      elif not isinstance(lvdata, dict):
983
        feedback_fn("  - ERROR: connection to %s failed (lvlist)" % (node,))
984
        bad = True
985
        continue
986
      else:
987
        node_volume[node] = lvdata
988

    
989
      # node_instance
990
      idata = nresult.get(constants.NV_INSTANCELIST, None)
991
      if not isinstance(idata, list):
992
        feedback_fn("  - ERROR: connection to %s failed (instancelist)" %
993
                    (node,))
994
        bad = True
995
        continue
996

    
997
      node_instance[node] = idata
998

    
999
      # node_info
1000
      nodeinfo = nresult.get(constants.NV_HVINFO, None)
1001
      if not isinstance(nodeinfo, dict):
1002
        feedback_fn("  - ERROR: connection to %s failed (hvinfo)" % (node,))
1003
        bad = True
1004
        continue
1005

    
1006
      try:
1007
        node_info[node] = {
1008
          "mfree": int(nodeinfo['memory_free']),
1009
          "dfree": int(nresult[constants.NV_VGLIST][vg_name]),
1010
          "pinst": [],
1011
          "sinst": [],
1012
          # dictionary holding all instances this node is secondary for,
1013
          # grouped by their primary node. Each key is a cluster node, and each
1014
          # value is a list of instances which have the key as primary and the
1015
          # current node as secondary.  this is handy to calculate N+1 memory
1016
          # availability if you can only failover from a primary to its
1017
          # secondary.
1018
          "sinst-by-pnode": {},
1019
        }
1020
      except ValueError:
1021
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
1022
        bad = True
1023
        continue
1024

    
1025
    node_vol_should = {}
1026

    
1027
    for instance in instancelist:
1028
      feedback_fn("* Verifying instance %s" % instance)
1029
      inst_config = instanceinfo[instance]
1030
      result =  self._VerifyInstance(instance, inst_config, node_volume,
1031
                                     node_instance, feedback_fn, n_offline)
1032
      bad = bad or result
1033
      inst_nodes_offline = []
1034

    
1035
      inst_config.MapLVsByNode(node_vol_should)
1036

    
1037
      instance_cfg[instance] = inst_config
1038

    
1039
      pnode = inst_config.primary_node
1040
      if pnode in node_info:
1041
        node_info[pnode]['pinst'].append(instance)
1042
      elif pnode not in n_offline:
1043
        feedback_fn("  - ERROR: instance %s, connection to primary node"
1044
                    " %s failed" % (instance, pnode))
1045
        bad = True
1046

    
1047
      if pnode in n_offline:
1048
        inst_nodes_offline.append(pnode)
1049

    
1050
      # If the instance is non-redundant we cannot survive losing its primary
1051
      # node, so we are not N+1 compliant. On the other hand we have no disk
1052
      # templates with more than one secondary so that situation is not well
1053
      # supported either.
1054
      # FIXME: does not support file-backed instances
1055
      if len(inst_config.secondary_nodes) == 0:
1056
        i_non_redundant.append(instance)
1057
      elif len(inst_config.secondary_nodes) > 1:
1058
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
1059
                    % instance)
1060

    
1061
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1062
        i_non_a_balanced.append(instance)
1063

    
1064
      for snode in inst_config.secondary_nodes:
1065
        if snode in node_info:
1066
          node_info[snode]['sinst'].append(instance)
1067
          if pnode not in node_info[snode]['sinst-by-pnode']:
1068
            node_info[snode]['sinst-by-pnode'][pnode] = []
1069
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
1070
        elif snode not in n_offline:
1071
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
1072
                      " %s failed" % (instance, snode))
1073
          bad = True
1074
        if snode in n_offline:
1075
          inst_nodes_offline.append(snode)
1076

    
1077
      if inst_nodes_offline:
1078
        # warn that the instance lives on offline nodes, and set bad=True
1079
        feedback_fn("  - ERROR: instance lives on offline node(s) %s" %
1080
                    ", ".join(inst_nodes_offline))
1081
        bad = True
1082

    
1083
    feedback_fn("* Verifying orphan volumes")
1084
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
1085
                                       feedback_fn)
1086
    bad = bad or result
1087

    
1088
    feedback_fn("* Verifying remaining instances")
1089
    result = self._VerifyOrphanInstances(instancelist, node_instance,
1090
                                         feedback_fn)
1091
    bad = bad or result
1092

    
1093
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1094
      feedback_fn("* Verifying N+1 Memory redundancy")
1095
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
1096
      bad = bad or result
1097

    
1098
    feedback_fn("* Other Notes")
1099
    if i_non_redundant:
1100
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1101
                  % len(i_non_redundant))
1102

    
1103
    if i_non_a_balanced:
1104
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1105
                  % len(i_non_a_balanced))
1106

    
1107
    if n_offline:
1108
      feedback_fn("  - NOTICE: %d offline node(s) found." % len(n_offline))
1109

    
1110
    return not bad
1111

    
1112
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1113
    """Analize the post-hooks' result
1114

1115
    This method analyses the hook result, handles it, and sends some
1116
    nicely-formatted feedback back to the user.
1117

1118
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1119
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1120
    @param hooks_results: the results of the multi-node hooks rpc call
1121
    @param feedback_fn: function used send feedback back to the caller
1122
    @param lu_result: previous Exec result
1123
    @return: the new Exec result, based on the previous result
1124
        and hook results
1125

1126
    """
1127
    # We only really run POST phase hooks, and are only interested in
1128
    # their results
1129
    if phase == constants.HOOKS_PHASE_POST:
1130
      # Used to change hooks' output to proper indentation
1131
      indent_re = re.compile('^', re.M)
1132
      feedback_fn("* Hooks Results")
1133
      if not hooks_results:
1134
        feedback_fn("  - ERROR: general communication failure")
1135
        lu_result = 1
1136
      else:
1137
        for node_name in hooks_results:
1138
          show_node_header = True
1139
          res = hooks_results[node_name]
1140
          if res.failed or res.data is False or not isinstance(res.data, list):
1141
            if res.offline:
1142
              # no need to warn or set fail return value
1143
              continue
1144
            feedback_fn("    Communication failure in hooks execution")
1145
            lu_result = 1
1146
            continue
1147
          for script, hkr, output in res.data:
1148
            if hkr == constants.HKR_FAIL:
1149
              # The node header is only shown once, if there are
1150
              # failing hooks on that node
1151
              if show_node_header:
1152
                feedback_fn("  Node %s:" % node_name)
1153
                show_node_header = False
1154
              feedback_fn("    ERROR: Script %s failed, output:" % script)
1155
              output = indent_re.sub('      ', output)
1156
              feedback_fn("%s" % output)
1157
              lu_result = 1
1158

    
1159
      return lu_result
1160

    
1161

    
1162
class LUVerifyDisks(NoHooksLU):
1163
  """Verifies the cluster disks status.
1164

1165
  """
1166
  _OP_REQP = []
1167
  REQ_BGL = False
1168

    
1169
  def ExpandNames(self):
1170
    self.needed_locks = {
1171
      locking.LEVEL_NODE: locking.ALL_SET,
1172
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1173
    }
1174
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1175

    
1176
  def CheckPrereq(self):
1177
    """Check prerequisites.
1178

1179
    This has no prerequisites.
1180

1181
    """
1182
    pass
1183

    
1184
  def Exec(self, feedback_fn):
1185
    """Verify integrity of cluster disks.
1186

1187
    """
1188
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
1189

    
1190
    vg_name = self.cfg.GetVGName()
1191
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1192
    instances = [self.cfg.GetInstanceInfo(name)
1193
                 for name in self.cfg.GetInstanceList()]
1194

    
1195
    nv_dict = {}
1196
    for inst in instances:
1197
      inst_lvs = {}
1198
      if (not inst.admin_up or
1199
          inst.disk_template not in constants.DTS_NET_MIRROR):
1200
        continue
1201
      inst.MapLVsByNode(inst_lvs)
1202
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1203
      for node, vol_list in inst_lvs.iteritems():
1204
        for vol in vol_list:
1205
          nv_dict[(node, vol)] = inst
1206

    
1207
    if not nv_dict:
1208
      return result
1209

    
1210
    node_lvs = self.rpc.call_volume_list(nodes, vg_name)
1211

    
1212
    to_act = set()
1213
    for node in nodes:
1214
      # node_volume
1215
      lvs = node_lvs[node]
1216
      if lvs.failed:
1217
        if not lvs.offline:
1218
          self.LogWarning("Connection to node %s failed: %s" %
1219
                          (node, lvs.data))
1220
        continue
1221
      lvs = lvs.data
1222
      if isinstance(lvs, basestring):
1223
        logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
1224
        res_nlvm[node] = lvs
1225
      elif not isinstance(lvs, dict):
1226
        logging.warning("Connection to node %s failed or invalid data"
1227
                        " returned", node)
1228
        res_nodes.append(node)
1229
        continue
1230

    
1231
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
1232
        inst = nv_dict.pop((node, lv_name), None)
1233
        if (not lv_online and inst is not None
1234
            and inst.name not in res_instances):
1235
          res_instances.append(inst.name)
1236

    
1237
    # any leftover items in nv_dict are missing LVs, let's arrange the
1238
    # data better
1239
    for key, inst in nv_dict.iteritems():
1240
      if inst.name not in res_missing:
1241
        res_missing[inst.name] = []
1242
      res_missing[inst.name].append(key)
1243

    
1244
    return result
1245

    
1246

    
1247
class LURenameCluster(LogicalUnit):
1248
  """Rename the cluster.
1249

1250
  """
1251
  HPATH = "cluster-rename"
1252
  HTYPE = constants.HTYPE_CLUSTER
1253
  _OP_REQP = ["name"]
1254

    
1255
  def BuildHooksEnv(self):
1256
    """Build hooks env.
1257

1258
    """
1259
    env = {
1260
      "OP_TARGET": self.cfg.GetClusterName(),
1261
      "NEW_NAME": self.op.name,
1262
      }
1263
    mn = self.cfg.GetMasterNode()
1264
    return env, [mn], [mn]
1265

    
1266
  def CheckPrereq(self):
1267
    """Verify that the passed name is a valid one.
1268

1269
    """
1270
    hostname = utils.HostInfo(self.op.name)
1271

    
1272
    new_name = hostname.name
1273
    self.ip = new_ip = hostname.ip
1274
    old_name = self.cfg.GetClusterName()
1275
    old_ip = self.cfg.GetMasterIP()
1276
    if new_name == old_name and new_ip == old_ip:
1277
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1278
                                 " cluster has changed")
1279
    if new_ip != old_ip:
1280
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1281
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1282
                                   " reachable on the network. Aborting." %
1283
                                   new_ip)
1284

    
1285
    self.op.name = new_name
1286

    
1287
  def Exec(self, feedback_fn):
1288
    """Rename the cluster.
1289

1290
    """
1291
    clustername = self.op.name
1292
    ip = self.ip
1293

    
1294
    # shutdown the master IP
1295
    master = self.cfg.GetMasterNode()
1296
    result = self.rpc.call_node_stop_master(master, False)
1297
    if result.failed or not result.data:
1298
      raise errors.OpExecError("Could not disable the master role")
1299

    
1300
    try:
1301
      cluster = self.cfg.GetClusterInfo()
1302
      cluster.cluster_name = clustername
1303
      cluster.master_ip = ip
1304
      self.cfg.Update(cluster)
1305

    
1306
      # update the known hosts file
1307
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
1308
      node_list = self.cfg.GetNodeList()
1309
      try:
1310
        node_list.remove(master)
1311
      except ValueError:
1312
        pass
1313
      result = self.rpc.call_upload_file(node_list,
1314
                                         constants.SSH_KNOWN_HOSTS_FILE)
1315
      for to_node, to_result in result.iteritems():
1316
        if to_result.failed or not to_result.data:
1317
          logging.error("Copy of file %s to node %s failed",
1318
                        constants.SSH_KNOWN_HOSTS_FILE, to_node)
1319

    
1320
    finally:
1321
      result = self.rpc.call_node_start_master(master, False)
1322
      if result.failed or not result.data:
1323
        self.LogWarning("Could not re-enable the master role on"
1324
                        " the master, please restart manually.")
1325

    
1326

    
1327
def _RecursiveCheckIfLVMBased(disk):
1328
  """Check if the given disk or its children are lvm-based.
1329

1330
  @type disk: L{objects.Disk}
1331
  @param disk: the disk to check
1332
  @rtype: booleean
1333
  @return: boolean indicating whether a LD_LV dev_type was found or not
1334

1335
  """
1336
  if disk.children:
1337
    for chdisk in disk.children:
1338
      if _RecursiveCheckIfLVMBased(chdisk):
1339
        return True
1340
  return disk.dev_type == constants.LD_LV
1341

    
1342

    
1343
class LUSetClusterParams(LogicalUnit):
1344
  """Change the parameters of the cluster.
1345

1346
  """
1347
  HPATH = "cluster-modify"
1348
  HTYPE = constants.HTYPE_CLUSTER
1349
  _OP_REQP = []
1350
  REQ_BGL = False
1351

    
1352
  def CheckParameters(self):
1353
    """Check parameters
1354

1355
    """
1356
    if not hasattr(self.op, "candidate_pool_size"):
1357
      self.op.candidate_pool_size = None
1358
    if self.op.candidate_pool_size is not None:
1359
      try:
1360
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1361
      except ValueError, err:
1362
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1363
                                   str(err))
1364
      if self.op.candidate_pool_size < 1:
1365
        raise errors.OpPrereqError("At least one master candidate needed")
1366

    
1367
  def ExpandNames(self):
1368
    # FIXME: in the future maybe other cluster params won't require checking on
1369
    # all nodes to be modified.
1370
    self.needed_locks = {
1371
      locking.LEVEL_NODE: locking.ALL_SET,
1372
    }
1373
    self.share_locks[locking.LEVEL_NODE] = 1
1374

    
1375
  def BuildHooksEnv(self):
1376
    """Build hooks env.
1377

1378
    """
1379
    env = {
1380
      "OP_TARGET": self.cfg.GetClusterName(),
1381
      "NEW_VG_NAME": self.op.vg_name,
1382
      }
1383
    mn = self.cfg.GetMasterNode()
1384
    return env, [mn], [mn]
1385

    
1386
  def CheckPrereq(self):
1387
    """Check prerequisites.
1388

1389
    This checks whether the given params don't conflict and
1390
    if the given volume group is valid.
1391

1392
    """
1393
    # FIXME: This only works because there is only one parameter that can be
1394
    # changed or removed.
1395
    if self.op.vg_name is not None and not self.op.vg_name:
1396
      instances = self.cfg.GetAllInstancesInfo().values()
1397
      for inst in instances:
1398
        for disk in inst.disks:
1399
          if _RecursiveCheckIfLVMBased(disk):
1400
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1401
                                       " lvm-based instances exist")
1402

    
1403
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1404

    
1405
    # if vg_name not None, checks given volume group on all nodes
1406
    if self.op.vg_name:
1407
      vglist = self.rpc.call_vg_list(node_list)
1408
      for node in node_list:
1409
        if vglist[node].failed:
1410
          # ignoring down node
1411
          self.LogWarning("Node %s unreachable/error, ignoring" % node)
1412
          continue
1413
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].data,
1414
                                              self.op.vg_name,
1415
                                              constants.MIN_VG_SIZE)
1416
        if vgstatus:
1417
          raise errors.OpPrereqError("Error on node '%s': %s" %
1418
                                     (node, vgstatus))
1419

    
1420
    self.cluster = cluster = self.cfg.GetClusterInfo()
1421
    # validate beparams changes
1422
    if self.op.beparams:
1423
      utils.CheckBEParams(self.op.beparams)
1424
      self.new_beparams = cluster.FillDict(
1425
        cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)
1426

    
1427
    # hypervisor list/parameters
1428
    self.new_hvparams = cluster.FillDict(cluster.hvparams, {})
1429
    if self.op.hvparams:
1430
      if not isinstance(self.op.hvparams, dict):
1431
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1432
      for hv_name, hv_dict in self.op.hvparams.items():
1433
        if hv_name not in self.new_hvparams:
1434
          self.new_hvparams[hv_name] = hv_dict
1435
        else:
1436
          self.new_hvparams[hv_name].update(hv_dict)
1437

    
1438
    if self.op.enabled_hypervisors is not None:
1439
      self.hv_list = self.op.enabled_hypervisors
1440
    else:
1441
      self.hv_list = cluster.enabled_hypervisors
1442

    
1443
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1444
      # either the enabled list has changed, or the parameters have, validate
1445
      for hv_name, hv_params in self.new_hvparams.items():
1446
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1447
            (self.op.enabled_hypervisors and
1448
             hv_name in self.op.enabled_hypervisors)):
1449
          # either this is a new hypervisor, or its parameters have changed
1450
          hv_class = hypervisor.GetHypervisor(hv_name)
1451
          hv_class.CheckParameterSyntax(hv_params)
1452
          _CheckHVParams(self, node_list, hv_name, hv_params)
1453

    
1454
  def Exec(self, feedback_fn):
1455
    """Change the parameters of the cluster.
1456

1457
    """
1458
    if self.op.vg_name is not None:
1459
      if self.op.vg_name != self.cfg.GetVGName():
1460
        self.cfg.SetVGName(self.op.vg_name)
1461
      else:
1462
        feedback_fn("Cluster LVM configuration already in desired"
1463
                    " state, not changing")
1464
    if self.op.hvparams:
1465
      self.cluster.hvparams = self.new_hvparams
1466
    if self.op.enabled_hypervisors is not None:
1467
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1468
    if self.op.beparams:
1469
      self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams
1470
    if self.op.candidate_pool_size is not None:
1471
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
1472

    
1473
    self.cfg.Update(self.cluster)
1474

    
1475
    # we want to update nodes after the cluster so that if any errors
1476
    # happen, we have recorded and saved the cluster info
1477
    if self.op.candidate_pool_size is not None:
1478
      _AdjustCandidatePool(self)
1479

    
1480

    
1481
class LURedistributeConfig(NoHooksLU):
1482
  """Force the redistribution of cluster configuration.
1483

1484
  This is a very simple LU.
1485

1486
  """
1487
  _OP_REQP = []
1488
  REQ_BGL = False
1489

    
1490
  def ExpandNames(self):
1491
    self.needed_locks = {
1492
      locking.LEVEL_NODE: locking.ALL_SET,
1493
    }
1494
    self.share_locks[locking.LEVEL_NODE] = 1
1495

    
1496
  def CheckPrereq(self):
1497
    """Check prerequisites.
1498

1499
    """
1500

    
1501
  def Exec(self, feedback_fn):
1502
    """Redistribute the configuration.
1503

1504
    """
1505
    self.cfg.Update(self.cfg.GetClusterInfo())
1506

    
1507

    
1508
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1509
  """Sleep and poll for an instance's disk to sync.
1510

1511
  """
1512
  if not instance.disks:
1513
    return True
1514

    
1515
  if not oneshot:
1516
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1517

    
1518
  node = instance.primary_node
1519

    
1520
  for dev in instance.disks:
1521
    lu.cfg.SetDiskID(dev, node)
1522

    
1523
  retries = 0
1524
  while True:
1525
    max_time = 0
1526
    done = True
1527
    cumul_degraded = False
1528
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1529
    if rstats.failed or not rstats.data:
1530
      lu.LogWarning("Can't get any data from node %s", node)
1531
      retries += 1
1532
      if retries >= 10:
1533
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1534
                                 " aborting." % node)
1535
      time.sleep(6)
1536
      continue
1537
    rstats = rstats.data
1538
    retries = 0
1539
    for i, mstat in enumerate(rstats):
1540
      if mstat is None:
1541
        lu.LogWarning("Can't compute data for node %s/%s",
1542
                           node, instance.disks[i].iv_name)
1543
        continue
1544
      # we ignore the ldisk parameter
1545
      perc_done, est_time, is_degraded, _ = mstat
1546
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1547
      if perc_done is not None:
1548
        done = False
1549
        if est_time is not None:
1550
          rem_time = "%d estimated seconds remaining" % est_time
1551
          max_time = est_time
1552
        else:
1553
          rem_time = "no time estimate"
1554
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1555
                        (instance.disks[i].iv_name, perc_done, rem_time))
1556
    if done or oneshot:
1557
      break
1558

    
1559
    time.sleep(min(60, max_time))
1560

    
1561
  if done:
1562
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1563
  return not cumul_degraded
1564

    
1565

    
1566
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1567
  """Check that mirrors are not degraded.
1568

1569
  The ldisk parameter, if True, will change the test from the
1570
  is_degraded attribute (which represents overall non-ok status for
1571
  the device(s)) to the ldisk (representing the local storage status).
1572

1573
  """
1574
  lu.cfg.SetDiskID(dev, node)
1575
  if ldisk:
1576
    idx = 6
1577
  else:
1578
    idx = 5
1579

    
1580
  result = True
1581
  if on_primary or dev.AssembleOnSecondary():
1582
    rstats = lu.rpc.call_blockdev_find(node, dev)
1583
    msg = rstats.RemoteFailMsg()
1584
    if msg:
1585
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
1586
      result = False
1587
    elif not rstats.payload:
1588
      lu.LogWarning("Can't find disk on node %s", node)
1589
      result = False
1590
    else:
1591
      result = result and (not rstats.payload[idx])
1592
  if dev.children:
1593
    for child in dev.children:
1594
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1595

    
1596
  return result
1597

    
1598

    
1599
class LUDiagnoseOS(NoHooksLU):
1600
  """Logical unit for OS diagnose/query.
1601

1602
  """
1603
  _OP_REQP = ["output_fields", "names"]
1604
  REQ_BGL = False
1605
  _FIELDS_STATIC = utils.FieldSet()
1606
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
1607

    
1608
  def ExpandNames(self):
1609
    if self.op.names:
1610
      raise errors.OpPrereqError("Selective OS query not supported")
1611

    
1612
    _CheckOutputFields(static=self._FIELDS_STATIC,
1613
                       dynamic=self._FIELDS_DYNAMIC,
1614
                       selected=self.op.output_fields)
1615

    
1616
    # Lock all nodes, in shared mode
1617
    self.needed_locks = {}
1618
    self.share_locks[locking.LEVEL_NODE] = 1
1619
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1620

    
1621
  def CheckPrereq(self):
1622
    """Check prerequisites.
1623

1624
    """
1625

    
1626
  @staticmethod
1627
  def _DiagnoseByOS(node_list, rlist):
1628
    """Remaps a per-node return list into an a per-os per-node dictionary
1629

1630
    @param node_list: a list with the names of all nodes
1631
    @param rlist: a map with node names as keys and OS objects as values
1632

1633
    @rtype: dict
1634
    @returns: a dictionary with osnames as keys and as value another map, with
1635
        nodes as keys and list of OS objects as values, eg::
1636

1637
          {"debian-etch": {"node1": [<object>,...],
1638
                           "node2": [<object>,]}
1639
          }
1640

1641
    """
1642
    all_os = {}
1643
    for node_name, nr in rlist.iteritems():
1644
      if nr.failed or not nr.data:
1645
        continue
1646
      for os_obj in nr.data:
1647
        if os_obj.name not in all_os:
1648
          # build a list of nodes for this os containing empty lists
1649
          # for each node in node_list
1650
          all_os[os_obj.name] = {}
1651
          for nname in node_list:
1652
            all_os[os_obj.name][nname] = []
1653
        all_os[os_obj.name][node_name].append(os_obj)
1654
    return all_os
1655

    
1656
  def Exec(self, feedback_fn):
1657
    """Compute the list of OSes.
1658

1659
    """
1660
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1661
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()
1662
                   if node in node_list]
1663
    node_data = self.rpc.call_os_diagnose(valid_nodes)
1664
    if node_data == False:
1665
      raise errors.OpExecError("Can't gather the list of OSes")
1666
    pol = self._DiagnoseByOS(valid_nodes, node_data)
1667
    output = []
1668
    for os_name, os_data in pol.iteritems():
1669
      row = []
1670
      for field in self.op.output_fields:
1671
        if field == "name":
1672
          val = os_name
1673
        elif field == "valid":
1674
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1675
        elif field == "node_status":
1676
          val = {}
1677
          for node_name, nos_list in os_data.iteritems():
1678
            val[node_name] = [(v.status, v.path) for v in nos_list]
1679
        else:
1680
          raise errors.ParameterError(field)
1681
        row.append(val)
1682
      output.append(row)
1683

    
1684
    return output
1685

    
1686

    
1687
class LURemoveNode(LogicalUnit):
1688
  """Logical unit for removing a node.
1689

1690
  """
1691
  HPATH = "node-remove"
1692
  HTYPE = constants.HTYPE_NODE
1693
  _OP_REQP = ["node_name"]
1694

    
1695
  def BuildHooksEnv(self):
1696
    """Build hooks env.
1697

1698
    This doesn't run on the target node in the pre phase as a failed
1699
    node would then be impossible to remove.
1700

1701
    """
1702
    env = {
1703
      "OP_TARGET": self.op.node_name,
1704
      "NODE_NAME": self.op.node_name,
1705
      }
1706
    all_nodes = self.cfg.GetNodeList()
1707
    all_nodes.remove(self.op.node_name)
1708
    return env, all_nodes, all_nodes
1709

    
1710
  def CheckPrereq(self):
1711
    """Check prerequisites.
1712

1713
    This checks:
1714
     - the node exists in the configuration
1715
     - it does not have primary or secondary instances
1716
     - it's not the master
1717

1718
    Any errors are signalled by raising errors.OpPrereqError.
1719

1720
    """
1721
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1722
    if node is None:
1723
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1724

    
1725
    instance_list = self.cfg.GetInstanceList()
1726

    
1727
    masternode = self.cfg.GetMasterNode()
1728
    if node.name == masternode:
1729
      raise errors.OpPrereqError("Node is the master node,"
1730
                                 " you need to failover first.")
1731

    
1732
    for instance_name in instance_list:
1733
      instance = self.cfg.GetInstanceInfo(instance_name)
1734
      if node.name in instance.all_nodes:
1735
        raise errors.OpPrereqError("Instance %s is still running on the node,"
1736
                                   " please remove first." % instance_name)
1737
    self.op.node_name = node.name
1738
    self.node = node
1739

    
1740
  def Exec(self, feedback_fn):
1741
    """Removes the node from the cluster.
1742

1743
    """
1744
    node = self.node
1745
    logging.info("Stopping the node daemon and removing configs from node %s",
1746
                 node.name)
1747

    
1748
    self.context.RemoveNode(node.name)
1749

    
1750
    self.rpc.call_node_leave_cluster(node.name)
1751

    
1752
    # Promote nodes to master candidate as needed
1753
    _AdjustCandidatePool(self)
1754

    
1755

    
1756
class LUQueryNodes(NoHooksLU):
1757
  """Logical unit for querying nodes.
1758

1759
  """
1760
  _OP_REQP = ["output_fields", "names", "use_locking"]
1761
  REQ_BGL = False
1762
  _FIELDS_DYNAMIC = utils.FieldSet(
1763
    "dtotal", "dfree",
1764
    "mtotal", "mnode", "mfree",
1765
    "bootid",
1766
    "ctotal", "cnodes", "csockets",
1767
    )
1768

    
1769
  _FIELDS_STATIC = utils.FieldSet(
1770
    "name", "pinst_cnt", "sinst_cnt",
1771
    "pinst_list", "sinst_list",
1772
    "pip", "sip", "tags",
1773
    "serial_no",
1774
    "master_candidate",
1775
    "master",
1776
    "offline",
1777
    )
1778

    
1779
  def ExpandNames(self):
1780
    _CheckOutputFields(static=self._FIELDS_STATIC,
1781
                       dynamic=self._FIELDS_DYNAMIC,
1782
                       selected=self.op.output_fields)
1783

    
1784
    self.needed_locks = {}
1785
    self.share_locks[locking.LEVEL_NODE] = 1
1786

    
1787
    if self.op.names:
1788
      self.wanted = _GetWantedNodes(self, self.op.names)
1789
    else:
1790
      self.wanted = locking.ALL_SET
1791

    
1792
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
1793
    self.do_locking = self.do_node_query and self.op.use_locking
1794
    if self.do_locking:
1795
      # if we don't request only static fields, we need to lock the nodes
1796
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
1797

    
1798

    
1799
  def CheckPrereq(self):
1800
    """Check prerequisites.
1801

1802
    """
1803
    # The validation of the node list is done in the _GetWantedNodes,
1804
    # if non empty, and if empty, there's no validation to do
1805
    pass
1806

    
1807
  def Exec(self, feedback_fn):
1808
    """Computes the list of nodes and their attributes.
1809

1810
    """
1811
    all_info = self.cfg.GetAllNodesInfo()
1812
    if self.do_locking:
1813
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
1814
    elif self.wanted != locking.ALL_SET:
1815
      nodenames = self.wanted
1816
      missing = set(nodenames).difference(all_info.keys())
1817
      if missing:
1818
        raise errors.OpExecError(
1819
          "Some nodes were removed before retrieving their data: %s" % missing)
1820
    else:
1821
      nodenames = all_info.keys()
1822

    
1823
    nodenames = utils.NiceSort(nodenames)
1824
    nodelist = [all_info[name] for name in nodenames]
1825

    
1826
    # begin data gathering
1827

    
1828
    if self.do_node_query:
1829
      live_data = {}
1830
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
1831
                                          self.cfg.GetHypervisorType())
1832
      for name in nodenames:
1833
        nodeinfo = node_data[name]
1834
        if not nodeinfo.failed and nodeinfo.data:
1835
          nodeinfo = nodeinfo.data
1836
          fn = utils.TryConvert
1837
          live_data[name] = {
1838
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
1839
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
1840
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
1841
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
1842
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
1843
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
1844
            "bootid": nodeinfo.get('bootid', None),
1845
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
1846
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
1847
            }
1848
        else:
1849
          live_data[name] = {}
1850
    else:
1851
      live_data = dict.fromkeys(nodenames, {})
1852

    
1853
    node_to_primary = dict([(name, set()) for name in nodenames])
1854
    node_to_secondary = dict([(name, set()) for name in nodenames])
1855

    
1856
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1857
                             "sinst_cnt", "sinst_list"))
1858
    if inst_fields & frozenset(self.op.output_fields):
1859
      instancelist = self.cfg.GetInstanceList()
1860

    
1861
      for instance_name in instancelist:
1862
        inst = self.cfg.GetInstanceInfo(instance_name)
1863
        if inst.primary_node in node_to_primary:
1864
          node_to_primary[inst.primary_node].add(inst.name)
1865
        for secnode in inst.secondary_nodes:
1866
          if secnode in node_to_secondary:
1867
            node_to_secondary[secnode].add(inst.name)
1868

    
1869
    master_node = self.cfg.GetMasterNode()
1870

    
1871
    # end data gathering
1872

    
1873
    output = []
1874
    for node in nodelist:
1875
      node_output = []
1876
      for field in self.op.output_fields:
1877
        if field == "name":
1878
          val = node.name
1879
        elif field == "pinst_list":
1880
          val = list(node_to_primary[node.name])
1881
        elif field == "sinst_list":
1882
          val = list(node_to_secondary[node.name])
1883
        elif field == "pinst_cnt":
1884
          val = len(node_to_primary[node.name])
1885
        elif field == "sinst_cnt":
1886
          val = len(node_to_secondary[node.name])
1887
        elif field == "pip":
1888
          val = node.primary_ip
1889
        elif field == "sip":
1890
          val = node.secondary_ip
1891
        elif field == "tags":
1892
          val = list(node.GetTags())
1893
        elif field == "serial_no":
1894
          val = node.serial_no
1895
        elif field == "master_candidate":
1896
          val = node.master_candidate
1897
        elif field == "master":
1898
          val = node.name == master_node
1899
        elif field == "offline":
1900
          val = node.offline
1901
        elif self._FIELDS_DYNAMIC.Matches(field):
1902
          val = live_data[node.name].get(field, None)
1903
        else:
1904
          raise errors.ParameterError(field)
1905
        node_output.append(val)
1906
      output.append(node_output)
1907

    
1908
    return output
1909

    
1910

    
1911
class LUQueryNodeVolumes(NoHooksLU):
1912
  """Logical unit for getting volumes on node(s).
1913

1914
  """
1915
  _OP_REQP = ["nodes", "output_fields"]
1916
  REQ_BGL = False
1917
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
1918
  _FIELDS_STATIC = utils.FieldSet("node")
1919

    
1920
  def ExpandNames(self):
1921
    _CheckOutputFields(static=self._FIELDS_STATIC,
1922
                       dynamic=self._FIELDS_DYNAMIC,
1923
                       selected=self.op.output_fields)
1924

    
1925
    self.needed_locks = {}
1926
    self.share_locks[locking.LEVEL_NODE] = 1
1927
    if not self.op.nodes:
1928
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1929
    else:
1930
      self.needed_locks[locking.LEVEL_NODE] = \
1931
        _GetWantedNodes(self, self.op.nodes)
1932

    
1933
  def CheckPrereq(self):
1934
    """Check prerequisites.
1935

1936
    This checks that the fields required are valid output fields.
1937

1938
    """
1939
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
1940

    
1941
  def Exec(self, feedback_fn):
1942
    """Computes the list of nodes and their attributes.
1943

1944
    """
1945
    nodenames = self.nodes
1946
    volumes = self.rpc.call_node_volumes(nodenames)
1947

    
1948
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1949
             in self.cfg.GetInstanceList()]
1950

    
1951
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1952

    
1953
    output = []
1954
    for node in nodenames:
1955
      if node not in volumes or volumes[node].failed or not volumes[node].data:
1956
        continue
1957

    
1958
      node_vols = volumes[node].data[:]
1959
      node_vols.sort(key=lambda vol: vol['dev'])
1960

    
1961
      for vol in node_vols:
1962
        node_output = []
1963
        for field in self.op.output_fields:
1964
          if field == "node":
1965
            val = node
1966
          elif field == "phys":
1967
            val = vol['dev']
1968
          elif field == "vg":
1969
            val = vol['vg']
1970
          elif field == "name":
1971
            val = vol['name']
1972
          elif field == "size":
1973
            val = int(float(vol['size']))
1974
          elif field == "instance":
1975
            for inst in ilist:
1976
              if node not in lv_by_node[inst]:
1977
                continue
1978
              if vol['name'] in lv_by_node[inst][node]:
1979
                val = inst.name
1980
                break
1981
            else:
1982
              val = '-'
1983
          else:
1984
            raise errors.ParameterError(field)
1985
          node_output.append(str(val))
1986

    
1987
        output.append(node_output)
1988

    
1989
    return output
1990

    
1991

    
1992
class LUAddNode(LogicalUnit):
1993
  """Logical unit for adding node to the cluster.
1994

1995
  """
1996
  HPATH = "node-add"
1997
  HTYPE = constants.HTYPE_NODE
1998
  _OP_REQP = ["node_name"]
1999

    
2000
  def BuildHooksEnv(self):
2001
    """Build hooks env.
2002

2003
    This will run on all nodes before, and on all nodes + the new node after.
2004

2005
    """
2006
    env = {
2007
      "OP_TARGET": self.op.node_name,
2008
      "NODE_NAME": self.op.node_name,
2009
      "NODE_PIP": self.op.primary_ip,
2010
      "NODE_SIP": self.op.secondary_ip,
2011
      }
2012
    nodes_0 = self.cfg.GetNodeList()
2013
    nodes_1 = nodes_0 + [self.op.node_name, ]
2014
    return env, nodes_0, nodes_1
2015

    
2016
  def CheckPrereq(self):
2017
    """Check prerequisites.
2018

2019
    This checks:
2020
     - the new node is not already in the config
2021
     - it is resolvable
2022
     - its parameters (single/dual homed) matches the cluster
2023

2024
    Any errors are signalled by raising errors.OpPrereqError.
2025

2026
    """
2027
    node_name = self.op.node_name
2028
    cfg = self.cfg
2029

    
2030
    dns_data = utils.HostInfo(node_name)
2031

    
2032
    node = dns_data.name
2033
    primary_ip = self.op.primary_ip = dns_data.ip
2034
    secondary_ip = getattr(self.op, "secondary_ip", None)
2035
    if secondary_ip is None:
2036
      secondary_ip = primary_ip
2037
    if not utils.IsValidIP(secondary_ip):
2038
      raise errors.OpPrereqError("Invalid secondary IP given")
2039
    self.op.secondary_ip = secondary_ip
2040

    
2041
    node_list = cfg.GetNodeList()
2042
    if not self.op.readd and node in node_list:
2043
      raise errors.OpPrereqError("Node %s is already in the configuration" %
2044
                                 node)
2045
    elif self.op.readd and node not in node_list:
2046
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
2047

    
2048
    for existing_node_name in node_list:
2049
      existing_node = cfg.GetNodeInfo(existing_node_name)
2050

    
2051
      if self.op.readd and node == existing_node_name:
2052
        if (existing_node.primary_ip != primary_ip or
2053
            existing_node.secondary_ip != secondary_ip):
2054
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
2055
                                     " address configuration as before")
2056
        continue
2057

    
2058
      if (existing_node.primary_ip == primary_ip or
2059
          existing_node.secondary_ip == primary_ip or
2060
          existing_node.primary_ip == secondary_ip or
2061
          existing_node.secondary_ip == secondary_ip):
2062
        raise errors.OpPrereqError("New node ip address(es) conflict with"
2063
                                   " existing node %s" % existing_node.name)
2064

    
2065
    # check that the type of the node (single versus dual homed) is the
2066
    # same as for the master
2067
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
2068
    master_singlehomed = myself.secondary_ip == myself.primary_ip
2069
    newbie_singlehomed = secondary_ip == primary_ip
2070
    if master_singlehomed != newbie_singlehomed:
2071
      if master_singlehomed:
2072
        raise errors.OpPrereqError("The master has no private ip but the"
2073
                                   " new node has one")
2074
      else:
2075
        raise errors.OpPrereqError("The master has a private ip but the"
2076
                                   " new node doesn't have one")
2077

    
2078
    # checks reachablity
2079
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
2080
      raise errors.OpPrereqError("Node not reachable by ping")
2081

    
2082
    if not newbie_singlehomed:
2083
      # check reachability from my secondary ip to newbie's secondary ip
2084
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
2085
                           source=myself.secondary_ip):
2086
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
2087
                                   " based ping to noded port")
2088

    
2089
    cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2090
    mc_now, _ = self.cfg.GetMasterCandidateStats()
2091
    master_candidate = mc_now < cp_size
2092

    
2093
    self.new_node = objects.Node(name=node,
2094
                                 primary_ip=primary_ip,
2095
                                 secondary_ip=secondary_ip,
2096
                                 master_candidate=master_candidate,
2097
                                 offline=False)
2098

    
2099
  def Exec(self, feedback_fn):
2100
    """Adds the new node to the cluster.
2101

2102
    """
2103
    new_node = self.new_node
2104
    node = new_node.name
2105

    
2106
    # check connectivity
2107
    result = self.rpc.call_version([node])[node]
2108
    result.Raise()
2109
    if result.data:
2110
      if constants.PROTOCOL_VERSION == result.data:
2111
        logging.info("Communication to node %s fine, sw version %s match",
2112
                     node, result.data)
2113
      else:
2114
        raise errors.OpExecError("Version mismatch master version %s,"
2115
                                 " node version %s" %
2116
                                 (constants.PROTOCOL_VERSION, result.data))
2117
    else:
2118
      raise errors.OpExecError("Cannot get version from the new node")
2119

    
2120
    # setup ssh on node
2121
    logging.info("Copy ssh key to node %s", node)
2122
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
2123
    keyarray = []
2124
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
2125
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
2126
                priv_key, pub_key]
2127

    
2128
    for i in keyfiles:
2129
      f = open(i, 'r')
2130
      try:
2131
        keyarray.append(f.read())
2132
      finally:
2133
        f.close()
2134

    
2135
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
2136
                                    keyarray[2],
2137
                                    keyarray[3], keyarray[4], keyarray[5])
2138

    
2139
    msg = result.RemoteFailMsg()
2140
    if msg:
2141
      raise errors.OpExecError("Cannot transfer ssh keys to the"
2142
                               " new node: %s" % msg)
2143

    
2144
    # Add node to our /etc/hosts, and add key to known_hosts
2145
    utils.AddHostToEtcHosts(new_node.name)
2146

    
2147
    if new_node.secondary_ip != new_node.primary_ip:
2148
      result = self.rpc.call_node_has_ip_address(new_node.name,
2149
                                                 new_node.secondary_ip)
2150
      if result.failed or not result.data:
2151
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
2152
                                 " you gave (%s). Please fix and re-run this"
2153
                                 " command." % new_node.secondary_ip)
2154

    
2155
    node_verify_list = [self.cfg.GetMasterNode()]
2156
    node_verify_param = {
2157
      'nodelist': [node],
2158
      # TODO: do a node-net-test as well?
2159
    }
2160

    
2161
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
2162
                                       self.cfg.GetClusterName())
2163
    for verifier in node_verify_list:
2164
      if result[verifier].failed or not result[verifier].data:
2165
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
2166
                                 " for remote verification" % verifier)
2167
      if result[verifier].data['nodelist']:
2168
        for failed in result[verifier].data['nodelist']:
2169
          feedback_fn("ssh/hostname verification failed %s -> %s" %
2170
                      (verifier, result[verifier].data['nodelist'][failed]))
2171
        raise errors.OpExecError("ssh/hostname verification failed.")
2172

    
2173
    # Distribute updated /etc/hosts and known_hosts to all nodes,
2174
    # including the node just added
2175
    myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
2176
    dist_nodes = self.cfg.GetNodeList()
2177
    if not self.op.readd:
2178
      dist_nodes.append(node)
2179
    if myself.name in dist_nodes:
2180
      dist_nodes.remove(myself.name)
2181

    
2182
    logging.debug("Copying hosts and known_hosts to all nodes")
2183
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
2184
      result = self.rpc.call_upload_file(dist_nodes, fname)
2185
      for to_node, to_result in result.iteritems():
2186
        if to_result.failed or not to_result.data:
2187
          logging.error("Copy of file %s to node %s failed", fname, to_node)
2188

    
2189
    to_copy = []
2190
    enabled_hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
2191
    if constants.HTS_USE_VNC.intersection(enabled_hypervisors):
2192
      to_copy.append(constants.VNC_PASSWORD_FILE)
2193

    
2194
    for fname in to_copy:
2195
      result = self.rpc.call_upload_file([node], fname)
2196
      if result[node].failed or not result[node]:
2197
        logging.error("Could not copy file %s to node %s", fname, node)
2198

    
2199
    if self.op.readd:
2200
      self.context.ReaddNode(new_node)
2201
    else:
2202
      self.context.AddNode(new_node)
2203

    
2204

    
2205
class LUSetNodeParams(LogicalUnit):
2206
  """Modifies the parameters of a node.
2207

2208
  """
2209
  HPATH = "node-modify"
2210
  HTYPE = constants.HTYPE_NODE
2211
  _OP_REQP = ["node_name"]
2212
  REQ_BGL = False
2213

    
2214
  def CheckArguments(self):
2215
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2216
    if node_name is None:
2217
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2218
    self.op.node_name = node_name
2219
    _CheckBooleanOpField(self.op, 'master_candidate')
2220
    _CheckBooleanOpField(self.op, 'offline')
2221
    if self.op.master_candidate is None and self.op.offline is None:
2222
      raise errors.OpPrereqError("Please pass at least one modification")
2223
    if self.op.offline == True and self.op.master_candidate == True:
2224
      raise errors.OpPrereqError("Can't set the node into offline and"
2225
                                 " master_candidate at the same time")
2226

    
2227
  def ExpandNames(self):
2228
    self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
2229

    
2230
  def BuildHooksEnv(self):
2231
    """Build hooks env.
2232

2233
    This runs on the master node.
2234

2235
    """
2236
    env = {
2237
      "OP_TARGET": self.op.node_name,
2238
      "MASTER_CANDIDATE": str(self.op.master_candidate),
2239
      "OFFLINE": str(self.op.offline),
2240
      }
2241
    nl = [self.cfg.GetMasterNode(),
2242
          self.op.node_name]
2243
    return env, nl, nl
2244

    
2245
  def CheckPrereq(self):
2246
    """Check prerequisites.
2247

2248
    This only checks the instance list against the existing names.
2249

2250
    """
2251
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
2252

    
2253
    if ((self.op.master_candidate == False or self.op.offline == True)
2254
        and node.master_candidate):
2255
      # we will demote the node from master_candidate
2256
      if self.op.node_name == self.cfg.GetMasterNode():
2257
        raise errors.OpPrereqError("The master node has to be a"
2258
                                   " master candidate and online")
2259
      cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2260
      num_candidates, _ = self.cfg.GetMasterCandidateStats()
2261
      if num_candidates <= cp_size:
2262
        msg = ("Not enough master candidates (desired"
2263
               " %d, new value will be %d)" % (cp_size, num_candidates-1))
2264
        if self.op.force:
2265
          self.LogWarning(msg)
2266
        else:
2267
          raise errors.OpPrereqError(msg)
2268

    
2269
    if (self.op.master_candidate == True and node.offline and
2270
        not self.op.offline == False):
2271
      raise errors.OpPrereqError("Can't set an offline node to"
2272
                                 " master_candidate")
2273

    
2274
    return
2275

    
2276
  def Exec(self, feedback_fn):
2277
    """Modifies a node.
2278

2279
    """
2280
    node = self.node
2281

    
2282
    result = []
2283

    
2284
    if self.op.offline is not None:
2285
      node.offline = self.op.offline
2286
      result.append(("offline", str(self.op.offline)))
2287
      if self.op.offline == True and node.master_candidate:
2288
        node.master_candidate = False
2289
        result.append(("master_candidate", "auto-demotion due to offline"))
2290

    
2291
    if self.op.master_candidate is not None:
2292
      node.master_candidate = self.op.master_candidate
2293
      result.append(("master_candidate", str(self.op.master_candidate)))
2294
      if self.op.master_candidate == False:
2295
        rrc = self.rpc.call_node_demote_from_mc(node.name)
2296
        msg = rrc.RemoteFailMsg()
2297
        if msg:
2298
          self.LogWarning("Node failed to demote itself: %s" % msg)
2299

    
2300
    # this will trigger configuration file update, if needed
2301
    self.cfg.Update(node)
2302
    # this will trigger job queue propagation or cleanup
2303
    if self.op.node_name != self.cfg.GetMasterNode():
2304
      self.context.ReaddNode(node)
2305

    
2306
    return result
2307

    
2308

    
2309
class LUQueryClusterInfo(NoHooksLU):
2310
  """Query cluster configuration.
2311

2312
  """
2313
  _OP_REQP = []
2314
  REQ_BGL = False
2315

    
2316
  def ExpandNames(self):
2317
    self.needed_locks = {}
2318

    
2319
  def CheckPrereq(self):
2320
    """No prerequsites needed for this LU.
2321

2322
    """
2323
    pass
2324

    
2325
  def Exec(self, feedback_fn):
2326
    """Return cluster config.
2327

2328
    """
2329
    cluster = self.cfg.GetClusterInfo()
2330
    result = {
2331
      "software_version": constants.RELEASE_VERSION,
2332
      "protocol_version": constants.PROTOCOL_VERSION,
2333
      "config_version": constants.CONFIG_VERSION,
2334
      "os_api_version": constants.OS_API_VERSION,
2335
      "export_version": constants.EXPORT_VERSION,
2336
      "architecture": (platform.architecture()[0], platform.machine()),
2337
      "name": cluster.cluster_name,
2338
      "master": cluster.master_node,
2339
      "default_hypervisor": cluster.default_hypervisor,
2340
      "enabled_hypervisors": cluster.enabled_hypervisors,
2341
      "hvparams": dict([(hypervisor, cluster.hvparams[hypervisor])
2342
                        for hypervisor in cluster.enabled_hypervisors]),
2343
      "beparams": cluster.beparams,
2344
      "candidate_pool_size": cluster.candidate_pool_size,
2345
      }
2346

    
2347
    return result
2348

    
2349

    
2350
class LUQueryConfigValues(NoHooksLU):
2351
  """Return configuration values.
2352

2353
  """
2354
  _OP_REQP = []
2355
  REQ_BGL = False
2356
  _FIELDS_DYNAMIC = utils.FieldSet()
2357
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag")
2358

    
2359
  def ExpandNames(self):
2360
    self.needed_locks = {}
2361

    
2362
    _CheckOutputFields(static=self._FIELDS_STATIC,
2363
                       dynamic=self._FIELDS_DYNAMIC,
2364
                       selected=self.op.output_fields)
2365

    
2366
  def CheckPrereq(self):
2367
    """No prerequisites.
2368

2369
    """
2370
    pass
2371

    
2372
  def Exec(self, feedback_fn):
2373
    """Dump a representation of the cluster config to the standard output.
2374

2375
    """
2376
    values = []
2377
    for field in self.op.output_fields:
2378
      if field == "cluster_name":
2379
        entry = self.cfg.GetClusterName()
2380
      elif field == "master_node":
2381
        entry = self.cfg.GetMasterNode()
2382
      elif field == "drain_flag":
2383
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2384
      else:
2385
        raise errors.ParameterError(field)
2386
      values.append(entry)
2387
    return values
2388

    
2389

    
2390
class LUActivateInstanceDisks(NoHooksLU):
2391
  """Bring up an instance's disks.
2392

2393
  """
2394
  _OP_REQP = ["instance_name"]
2395
  REQ_BGL = False
2396

    
2397
  def ExpandNames(self):
2398
    self._ExpandAndLockInstance()
2399
    self.needed_locks[locking.LEVEL_NODE] = []
2400
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2401

    
2402
  def DeclareLocks(self, level):
2403
    if level == locking.LEVEL_NODE:
2404
      self._LockInstancesNodes()
2405

    
2406
  def CheckPrereq(self):
2407
    """Check prerequisites.
2408

2409
    This checks that the instance is in the cluster.
2410

2411
    """
2412
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2413
    assert self.instance is not None, \
2414
      "Cannot retrieve locked instance %s" % self.op.instance_name
2415
    _CheckNodeOnline(self, self.instance.primary_node)
2416

    
2417
  def Exec(self, feedback_fn):
2418
    """Activate the disks.
2419

2420
    """
2421
    disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
2422
    if not disks_ok:
2423
      raise errors.OpExecError("Cannot activate block devices")
2424

    
2425
    return disks_info
2426

    
2427

    
2428
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
2429
  """Prepare the block devices for an instance.
2430

2431
  This sets up the block devices on all nodes.
2432

2433
  @type lu: L{LogicalUnit}
2434
  @param lu: the logical unit on whose behalf we execute
2435
  @type instance: L{objects.Instance}
2436
  @param instance: the instance for whose disks we assemble
2437
  @type ignore_secondaries: boolean
2438
  @param ignore_secondaries: if true, errors on secondary nodes
2439
      won't result in an error return from the function
2440
  @return: False if the operation failed, otherwise a list of
2441
      (host, instance_visible_name, node_visible_name)
2442
      with the mapping from node devices to instance devices
2443

2444
  """
2445
  device_info = []
2446
  disks_ok = True
2447
  iname = instance.name
2448
  # With the two passes mechanism we try to reduce the window of
2449
  # opportunity for the race condition of switching DRBD to primary
2450
  # before handshaking occured, but we do not eliminate it
2451

    
2452
  # The proper fix would be to wait (with some limits) until the
2453
  # connection has been made and drbd transitions from WFConnection
2454
  # into any other network-connected state (Connected, SyncTarget,
2455
  # SyncSource, etc.)
2456

    
2457
  # 1st pass, assemble on all nodes in secondary mode
2458
  for inst_disk in instance.disks:
2459
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2460
      lu.cfg.SetDiskID(node_disk, node)
2461
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2462
      if result.failed or not result:
2463
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2464
                           " (is_primary=False, pass=1)",
2465
                           inst_disk.iv_name, node)
2466
        if not ignore_secondaries:
2467
          disks_ok = False
2468

    
2469
  # FIXME: race condition on drbd migration to primary
2470

    
2471
  # 2nd pass, do only the primary node
2472
  for inst_disk in instance.disks:
2473
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2474
      if node != instance.primary_node:
2475
        continue
2476
      lu.cfg.SetDiskID(node_disk, node)
2477
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2478
      if result.failed or not result:
2479
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2480
                           " (is_primary=True, pass=2)",
2481
                           inst_disk.iv_name, node)
2482
        disks_ok = False
2483
    device_info.append((instance.primary_node, inst_disk.iv_name, result.data))
2484

    
2485
  # leave the disks configured for the primary node
2486
  # this is a workaround that would be fixed better by
2487
  # improving the logical/physical id handling
2488
  for disk in instance.disks:
2489
    lu.cfg.SetDiskID(disk, instance.primary_node)
2490

    
2491
  return disks_ok, device_info
2492

    
2493

    
2494
def _StartInstanceDisks(lu, instance, force):
2495
  """Start the disks of an instance.
2496

2497
  """
2498
  disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2499
                                           ignore_secondaries=force)
2500
  if not disks_ok:
2501
    _ShutdownInstanceDisks(lu, instance)
2502
    if force is not None and not force:
2503
      lu.proc.LogWarning("", hint="If the message above refers to a"
2504
                         " secondary node,"
2505
                         " you can retry the operation using '--force'.")
2506
    raise errors.OpExecError("Disk consistency error")
2507

    
2508

    
2509
class LUDeactivateInstanceDisks(NoHooksLU):
2510
  """Shutdown an instance's disks.
2511

2512
  """
2513
  _OP_REQP = ["instance_name"]
2514
  REQ_BGL = False
2515

    
2516
  def ExpandNames(self):
2517
    self._ExpandAndLockInstance()
2518
    self.needed_locks[locking.LEVEL_NODE] = []
2519
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2520

    
2521
  def DeclareLocks(self, level):
2522
    if level == locking.LEVEL_NODE:
2523
      self._LockInstancesNodes()
2524

    
2525
  def CheckPrereq(self):
2526
    """Check prerequisites.
2527

2528
    This checks that the instance is in the cluster.
2529

2530
    """
2531
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2532
    assert self.instance is not None, \
2533
      "Cannot retrieve locked instance %s" % self.op.instance_name
2534

    
2535
  def Exec(self, feedback_fn):
2536
    """Deactivate the disks
2537

2538
    """
2539
    instance = self.instance
2540
    _SafeShutdownInstanceDisks(self, instance)
2541

    
2542

    
2543
def _SafeShutdownInstanceDisks(lu, instance):
2544
  """Shutdown block devices of an instance.
2545

2546
  This function checks if an instance is running, before calling
2547
  _ShutdownInstanceDisks.
2548

2549
  """
2550
  ins_l = lu.rpc.call_instance_list([instance.primary_node],
2551
                                      [instance.hypervisor])
2552
  ins_l = ins_l[instance.primary_node]
2553
  if ins_l.failed or not isinstance(ins_l.data, list):
2554
    raise errors.OpExecError("Can't contact node '%s'" %
2555
                             instance.primary_node)
2556

    
2557
  if instance.name in ins_l.data:
2558
    raise errors.OpExecError("Instance is running, can't shutdown"
2559
                             " block devices.")
2560

    
2561
  _ShutdownInstanceDisks(lu, instance)
2562

    
2563

    
2564
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2565
  """Shutdown block devices of an instance.
2566

2567
  This does the shutdown on all nodes of the instance.
2568

2569
  If the ignore_primary is false, errors on the primary node are
2570
  ignored.
2571

2572
  """
2573
  result = True
2574
  for disk in instance.disks:
2575
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2576
      lu.cfg.SetDiskID(top_disk, node)
2577
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
2578
      if result.failed or not result.data:
2579
        logging.error("Could not shutdown block device %s on node %s",
2580
                      disk.iv_name, node)
2581
        if not ignore_primary or node != instance.primary_node:
2582
          result = False
2583
  return result
2584

    
2585

    
2586
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
2587
  """Checks if a node has enough free memory.
2588

2589
  This function check if a given node has the needed amount of free
2590
  memory. In case the node has less memory or we cannot get the
2591
  information from the node, this function raise an OpPrereqError
2592
  exception.
2593

2594
  @type lu: C{LogicalUnit}
2595
  @param lu: a logical unit from which we get configuration data
2596
  @type node: C{str}
2597
  @param node: the node to check
2598
  @type reason: C{str}
2599
  @param reason: string to use in the error message
2600
  @type requested: C{int}
2601
  @param requested: the amount of memory in MiB to check for
2602
  @type hypervisor_name: C{str}
2603
  @param hypervisor_name: the hypervisor to ask for memory stats
2604
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2605
      we cannot check the node
2606

2607
  """
2608
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
2609
  nodeinfo[node].Raise()
2610
  free_mem = nodeinfo[node].data.get('memory_free')
2611
  if not isinstance(free_mem, int):
2612
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2613
                             " was '%s'" % (node, free_mem))
2614
  if requested > free_mem:
2615
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2616
                             " needed %s MiB, available %s MiB" %
2617
                             (node, reason, requested, free_mem))
2618

    
2619

    
2620
class LUStartupInstance(LogicalUnit):
2621
  """Starts an instance.
2622

2623
  """
2624
  HPATH = "instance-start"
2625
  HTYPE = constants.HTYPE_INSTANCE
2626
  _OP_REQP = ["instance_name", "force"]
2627
  REQ_BGL = False
2628

    
2629
  def ExpandNames(self):
2630
    self._ExpandAndLockInstance()
2631

    
2632
  def BuildHooksEnv(self):
2633
    """Build hooks env.
2634

2635
    This runs on master, primary and secondary nodes of the instance.
2636

2637
    """
2638
    env = {
2639
      "FORCE": self.op.force,
2640
      }
2641
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2642
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2643
    return env, nl, nl
2644

    
2645
  def CheckPrereq(self):
2646
    """Check prerequisites.
2647

2648
    This checks that the instance is in the cluster.
2649

2650
    """
2651
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2652
    assert self.instance is not None, \
2653
      "Cannot retrieve locked instance %s" % self.op.instance_name
2654

    
2655
    _CheckNodeOnline(self, instance.primary_node)
2656

    
2657
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2658
    # check bridges existance
2659
    _CheckInstanceBridgesExist(self, instance)
2660

    
2661
    _CheckNodeFreeMemory(self, instance.primary_node,
2662
                         "starting instance %s" % instance.name,
2663
                         bep[constants.BE_MEMORY], instance.hypervisor)
2664

    
2665
  def Exec(self, feedback_fn):
2666
    """Start the instance.
2667

2668
    """
2669
    instance = self.instance
2670
    force = self.op.force
2671
    extra_args = getattr(self.op, "extra_args", "")
2672

    
2673
    self.cfg.MarkInstanceUp(instance.name)
2674

    
2675
    node_current = instance.primary_node
2676

    
2677
    _StartInstanceDisks(self, instance, force)
2678

    
2679
    result = self.rpc.call_instance_start(node_current, instance, extra_args)
2680
    msg = result.RemoteFailMsg()
2681
    if msg:
2682
      _ShutdownInstanceDisks(self, instance)
2683
      raise errors.OpExecError("Could not start instance: %s" % msg)
2684

    
2685

    
2686
class LURebootInstance(LogicalUnit):
2687
  """Reboot an instance.
2688

2689
  """
2690
  HPATH = "instance-reboot"
2691
  HTYPE = constants.HTYPE_INSTANCE
2692
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2693
  REQ_BGL = False
2694

    
2695
  def ExpandNames(self):
2696
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2697
                                   constants.INSTANCE_REBOOT_HARD,
2698
                                   constants.INSTANCE_REBOOT_FULL]:
2699
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2700
                                  (constants.INSTANCE_REBOOT_SOFT,
2701
                                   constants.INSTANCE_REBOOT_HARD,
2702
                                   constants.INSTANCE_REBOOT_FULL))
2703
    self._ExpandAndLockInstance()
2704

    
2705
  def BuildHooksEnv(self):
2706
    """Build hooks env.
2707

2708
    This runs on master, primary and secondary nodes of the instance.
2709

2710
    """
2711
    env = {
2712
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2713
      }
2714
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2715
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2716
    return env, nl, nl
2717

    
2718
  def CheckPrereq(self):
2719
    """Check prerequisites.
2720

2721
    This checks that the instance is in the cluster.
2722

2723
    """
2724
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2725
    assert self.instance is not None, \
2726
      "Cannot retrieve locked instance %s" % self.op.instance_name
2727

    
2728
    _CheckNodeOnline(self, instance.primary_node)
2729

    
2730
    # check bridges existance
2731
    _CheckInstanceBridgesExist(self, instance)
2732

    
2733
  def Exec(self, feedback_fn):
2734
    """Reboot the instance.
2735

2736
    """
2737
    instance = self.instance
2738
    ignore_secondaries = self.op.ignore_secondaries
2739
    reboot_type = self.op.reboot_type
2740
    extra_args = getattr(self.op, "extra_args", "")
2741

    
2742
    node_current = instance.primary_node
2743

    
2744
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2745
                       constants.INSTANCE_REBOOT_HARD]:
2746
      result = self.rpc.call_instance_reboot(node_current, instance,
2747
                                             reboot_type, extra_args)
2748
      if result.failed or not result.data:
2749
        raise errors.OpExecError("Could not reboot instance")
2750
    else:
2751
      if not self.rpc.call_instance_shutdown(node_current, instance):
2752
        raise errors.OpExecError("could not shutdown instance for full reboot")
2753
      _ShutdownInstanceDisks(self, instance)
2754
      _StartInstanceDisks(self, instance, ignore_secondaries)
2755
      result = self.rpc.call_instance_start(node_current, instance, extra_args)
2756
      msg = result.RemoteFailMsg()
2757
      if msg:
2758
        _ShutdownInstanceDisks(self, instance)
2759
        raise errors.OpExecError("Could not start instance for"
2760
                                 " full reboot: %s" % msg)
2761

    
2762
    self.cfg.MarkInstanceUp(instance.name)
2763

    
2764

    
2765
class LUShutdownInstance(LogicalUnit):
2766
  """Shutdown an instance.
2767

2768
  """
2769
  HPATH = "instance-stop"
2770
  HTYPE = constants.HTYPE_INSTANCE
2771
  _OP_REQP = ["instance_name"]
2772
  REQ_BGL = False
2773

    
2774
  def ExpandNames(self):
2775
    self._ExpandAndLockInstance()
2776

    
2777
  def BuildHooksEnv(self):
2778
    """Build hooks env.
2779

2780
    This runs on master, primary and secondary nodes of the instance.
2781

2782
    """
2783
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2784
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2785
    return env, nl, nl
2786

    
2787
  def CheckPrereq(self):
2788
    """Check prerequisites.
2789

2790
    This checks that the instance is in the cluster.
2791

2792
    """
2793
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2794
    assert self.instance is not None, \
2795
      "Cannot retrieve locked instance %s" % self.op.instance_name
2796
    _CheckNodeOnline(self, self.instance.primary_node)
2797

    
2798
  def Exec(self, feedback_fn):
2799
    """Shutdown the instance.
2800

2801
    """
2802
    instance = self.instance
2803
    node_current = instance.primary_node
2804
    self.cfg.MarkInstanceDown(instance.name)
2805
    result = self.rpc.call_instance_shutdown(node_current, instance)
2806
    if result.failed or not result.data:
2807
      self.proc.LogWarning("Could not shutdown instance")
2808

    
2809
    _ShutdownInstanceDisks(self, instance)
2810

    
2811

    
2812
class LUReinstallInstance(LogicalUnit):
2813
  """Reinstall an instance.
2814

2815
  """
2816
  HPATH = "instance-reinstall"
2817
  HTYPE = constants.HTYPE_INSTANCE
2818
  _OP_REQP = ["instance_name"]
2819
  REQ_BGL = False
2820

    
2821
  def ExpandNames(self):
2822
    self._ExpandAndLockInstance()
2823

    
2824
  def BuildHooksEnv(self):
2825
    """Build hooks env.
2826

2827
    This runs on master, primary and secondary nodes of the instance.
2828

2829
    """
2830
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2831
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2832
    return env, nl, nl
2833

    
2834
  def CheckPrereq(self):
2835
    """Check prerequisites.
2836

2837
    This checks that the instance is in the cluster and is not running.
2838

2839
    """
2840
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2841
    assert instance is not None, \
2842
      "Cannot retrieve locked instance %s" % self.op.instance_name
2843
    _CheckNodeOnline(self, instance.primary_node)
2844

    
2845
    if instance.disk_template == constants.DT_DISKLESS:
2846
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2847
                                 self.op.instance_name)
2848
    if instance.admin_up:
2849
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2850
                                 self.op.instance_name)
2851
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2852
                                              instance.name,
2853
                                              instance.hypervisor)
2854
    if remote_info.failed or remote_info.data:
2855
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2856
                                 (self.op.instance_name,
2857
                                  instance.primary_node))
2858

    
2859
    self.op.os_type = getattr(self.op, "os_type", None)
2860
    if self.op.os_type is not None:
2861
      # OS verification
2862
      pnode = self.cfg.GetNodeInfo(
2863
        self.cfg.ExpandNodeName(instance.primary_node))
2864
      if pnode is None:
2865
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2866
                                   self.op.pnode)
2867
      result = self.rpc.call_os_get(pnode.name, self.op.os_type)
2868
      result.Raise()
2869
      if not isinstance(result.data, objects.OS):
2870
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2871
                                   " primary node"  % self.op.os_type)
2872

    
2873
    self.instance = instance
2874

    
2875
  def Exec(self, feedback_fn):
2876
    """Reinstall the instance.
2877

2878
    """
2879
    inst = self.instance
2880

    
2881
    if self.op.os_type is not None:
2882
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2883
      inst.os = self.op.os_type
2884
      self.cfg.Update(inst)
2885

    
2886
    _StartInstanceDisks(self, inst, None)
2887
    try:
2888
      feedback_fn("Running the instance OS create scripts...")
2889
      result = self.rpc.call_instance_os_add(inst.primary_node, inst)
2890
      msg = result.RemoteFailMsg()
2891
      if msg:
2892
        raise errors.OpExecError("Could not install OS for instance %s"
2893
                                 " on node %s: %s" %
2894
                                 (inst.name, inst.primary_node, msg))
2895
    finally:
2896
      _ShutdownInstanceDisks(self, inst)
2897

    
2898

    
2899
class LURenameInstance(LogicalUnit):
2900
  """Rename an instance.
2901

2902
  """
2903
  HPATH = "instance-rename"
2904
  HTYPE = constants.HTYPE_INSTANCE
2905
  _OP_REQP = ["instance_name", "new_name"]
2906

    
2907
  def BuildHooksEnv(self):
2908
    """Build hooks env.
2909

2910
    This runs on master, primary and secondary nodes of the instance.
2911

2912
    """
2913
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2914
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2915
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2916
    return env, nl, nl
2917

    
2918
  def CheckPrereq(self):
2919
    """Check prerequisites.
2920

2921
    This checks that the instance is in the cluster and is not running.
2922

2923
    """
2924
    instance = self.cfg.GetInstanceInfo(
2925
      self.cfg.ExpandInstanceName(self.op.instance_name))
2926
    if instance is None:
2927
      raise errors.OpPrereqError("Instance '%s' not known" %
2928
                                 self.op.instance_name)
2929
    _CheckNodeOnline(self, instance.primary_node)
2930

    
2931
    if instance.admin_up:
2932
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2933
                                 self.op.instance_name)
2934
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2935
                                              instance.name,
2936
                                              instance.hypervisor)
2937
    remote_info.Raise()
2938
    if remote_info.data:
2939
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2940
                                 (self.op.instance_name,
2941
                                  instance.primary_node))
2942
    self.instance = instance
2943

    
2944
    # new name verification
2945
    name_info = utils.HostInfo(self.op.new_name)
2946

    
2947
    self.op.new_name = new_name = name_info.name
2948
    instance_list = self.cfg.GetInstanceList()
2949
    if new_name in instance_list:
2950
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2951
                                 new_name)
2952

    
2953
    if not getattr(self.op, "ignore_ip", False):
2954
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
2955
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2956
                                   (name_info.ip, new_name))
2957

    
2958

    
2959
  def Exec(self, feedback_fn):
2960
    """Reinstall the instance.
2961

2962
    """
2963
    inst = self.instance
2964
    old_name = inst.name
2965

    
2966
    if inst.disk_template == constants.DT_FILE:
2967
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2968

    
2969
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2970
    # Change the instance lock. This is definitely safe while we hold the BGL
2971
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
2972
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
2973

    
2974
    # re-read the instance from the configuration after rename
2975
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2976

    
2977
    if inst.disk_template == constants.DT_FILE:
2978
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2979
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
2980
                                                     old_file_storage_dir,
2981
                                                     new_file_storage_dir)
2982
      result.Raise()
2983
      if not result.data:
2984
        raise errors.OpExecError("Could not connect to node '%s' to rename"
2985
                                 " directory '%s' to '%s' (but the instance"
2986
                                 " has been renamed in Ganeti)" % (
2987
                                 inst.primary_node, old_file_storage_dir,
2988
                                 new_file_storage_dir))
2989

    
2990
      if not result.data[0]:
2991
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2992
                                 " (but the instance has been renamed in"
2993
                                 " Ganeti)" % (old_file_storage_dir,
2994
                                               new_file_storage_dir))
2995

    
2996
    _StartInstanceDisks(self, inst, None)
2997
    try:
2998
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
2999
                                                 old_name)
3000
      msg = result.RemoteFailMsg()
3001
      if msg:
3002
        msg = ("Could not run OS rename script for instance %s on node %s"
3003
               " (but the instance has been renamed in Ganeti): %s" %
3004
               (inst.name, inst.primary_node, msg))
3005
        self.proc.LogWarning(msg)
3006
    finally:
3007
      _ShutdownInstanceDisks(self, inst)
3008

    
3009

    
3010
class LURemoveInstance(LogicalUnit):
3011
  """Remove an instance.
3012

3013
  """
3014
  HPATH = "instance-remove"
3015
  HTYPE = constants.HTYPE_INSTANCE
3016
  _OP_REQP = ["instance_name", "ignore_failures"]
3017
  REQ_BGL = False
3018

    
3019
  def ExpandNames(self):
3020
    self._ExpandAndLockInstance()
3021
    self.needed_locks[locking.LEVEL_NODE] = []
3022
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3023

    
3024
  def DeclareLocks(self, level):
3025
    if level == locking.LEVEL_NODE:
3026
      self._LockInstancesNodes()
3027

    
3028
  def BuildHooksEnv(self):
3029
    """Build hooks env.
3030

3031
    This runs on master, primary and secondary nodes of the instance.
3032

3033
    """
3034
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3035
    nl = [self.cfg.GetMasterNode()]
3036
    return env, nl, nl
3037

    
3038
  def CheckPrereq(self):
3039
    """Check prerequisites.
3040

3041
    This checks that the instance is in the cluster.
3042

3043
    """
3044
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3045
    assert self.instance is not None, \
3046
      "Cannot retrieve locked instance %s" % self.op.instance_name
3047

    
3048
  def Exec(self, feedback_fn):
3049
    """Remove the instance.
3050

3051
    """
3052
    instance = self.instance
3053
    logging.info("Shutting down instance %s on node %s",
3054
                 instance.name, instance.primary_node)
3055

    
3056
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
3057
    if result.failed or not result.data:
3058
      if self.op.ignore_failures:
3059
        feedback_fn("Warning: can't shutdown instance")
3060
      else:
3061
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
3062
                                 (instance.name, instance.primary_node))
3063

    
3064
    logging.info("Removing block devices for instance %s", instance.name)
3065

    
3066
    if not _RemoveDisks(self, instance):
3067
      if self.op.ignore_failures:
3068
        feedback_fn("Warning: can't remove instance's disks")
3069
      else:
3070
        raise errors.OpExecError("Can't remove instance's disks")
3071

    
3072
    logging.info("Removing instance %s out of cluster config", instance.name)
3073

    
3074
    self.cfg.RemoveInstance(instance.name)
3075
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
3076

    
3077

    
3078
class LUQueryInstances(NoHooksLU):
3079
  """Logical unit for querying instances.
3080

3081
  """
3082
  _OP_REQP = ["output_fields", "names", "use_locking"]
3083
  REQ_BGL = False
3084
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
3085
                                    "admin_state", "admin_ram",
3086
                                    "disk_template", "ip", "mac", "bridge",
3087
                                    "sda_size", "sdb_size", "vcpus", "tags",
3088
                                    "network_port", "beparams",
3089
                                    "(disk).(size)/([0-9]+)",
3090
                                    "(disk).(sizes)", "disk_usage",
3091
                                    "(nic).(mac|ip|bridge)/([0-9]+)",
3092
                                    "(nic).(macs|ips|bridges)",
3093
                                    "(disk|nic).(count)",
3094
                                    "serial_no", "hypervisor", "hvparams",] +
3095
                                  ["hv/%s" % name
3096
                                   for name in constants.HVS_PARAMETERS] +
3097
                                  ["be/%s" % name
3098
                                   for name in constants.BES_PARAMETERS])
3099
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
3100

    
3101

    
3102
  def ExpandNames(self):
3103
    _CheckOutputFields(static=self._FIELDS_STATIC,
3104
                       dynamic=self._FIELDS_DYNAMIC,
3105
                       selected=self.op.output_fields)
3106

    
3107
    self.needed_locks = {}
3108
    self.share_locks[locking.LEVEL_INSTANCE] = 1
3109
    self.share_locks[locking.LEVEL_NODE] = 1
3110

    
3111
    if self.op.names:
3112
      self.wanted = _GetWantedInstances(self, self.op.names)
3113
    else:
3114
      self.wanted = locking.ALL_SET
3115

    
3116
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
3117
    self.do_locking = self.do_node_query and self.op.use_locking
3118
    if self.do_locking:
3119
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3120
      self.needed_locks[locking.LEVEL_NODE] = []
3121
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3122

    
3123
  def DeclareLocks(self, level):
3124
    if level == locking.LEVEL_NODE and self.do_locking:
3125
      self._LockInstancesNodes()
3126

    
3127
  def CheckPrereq(self):
3128
    """Check prerequisites.
3129

3130
    """
3131
    pass
3132

    
3133
  def Exec(self, feedback_fn):
3134
    """Computes the list of nodes and their attributes.
3135

3136
    """
3137
    all_info = self.cfg.GetAllInstancesInfo()
3138
    if self.wanted == locking.ALL_SET:
3139
      # caller didn't specify instance names, so ordering is not important
3140
      if self.do_locking:
3141
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
3142
      else:
3143
        instance_names = all_info.keys()
3144
      instance_names = utils.NiceSort(instance_names)
3145
    else:
3146
      # caller did specify names, so we must keep the ordering
3147
      if self.do_locking:
3148
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
3149
      else:
3150
        tgt_set = all_info.keys()
3151
      missing = set(self.wanted).difference(tgt_set)
3152
      if missing:
3153
        raise errors.OpExecError("Some instances were removed before"
3154
                                 " retrieving their data: %s" % missing)
3155
      instance_names = self.wanted
3156

    
3157
    instance_list = [all_info[iname] for iname in instance_names]
3158

    
3159
    # begin data gathering
3160

    
3161
    nodes = frozenset([inst.primary_node for inst in instance_list])
3162
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
3163

    
3164
    bad_nodes = []
3165
    off_nodes = []
3166
    if self.do_node_query:
3167
      live_data = {}
3168
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
3169
      for name in nodes:
3170
        result = node_data[name]
3171
        if result.offline:
3172
          # offline nodes will be in both lists
3173
          off_nodes.append(name)
3174
        if result.failed:
3175
          bad_nodes.append(name)
3176
        else:
3177
          if result.data:
3178
            live_data.update(result.data)
3179
            # else no instance is alive
3180
    else:
3181
      live_data = dict([(name, {}) for name in instance_names])
3182

    
3183
    # end data gathering
3184

    
3185
    HVPREFIX = "hv/"
3186
    BEPREFIX = "be/"
3187
    output = []
3188
    for instance in instance_list:
3189
      iout = []
3190
      i_hv = self.cfg.GetClusterInfo().FillHV(instance)
3191
      i_be = self.cfg.GetClusterInfo().FillBE(instance)
3192
      for field in self.op.output_fields:
3193
        st_match = self._FIELDS_STATIC.Matches(field)
3194
        if field == "name":
3195
          val = instance.name
3196
        elif field == "os":
3197
          val = instance.os
3198
        elif field == "pnode":
3199
          val = instance.primary_node
3200
        elif field == "snodes":
3201
          val = list(instance.secondary_nodes)
3202
        elif field == "admin_state":
3203
          val = instance.admin_up
3204
        elif field == "oper_state":
3205
          if instance.primary_node in bad_nodes:
3206
            val = None
3207
          else:
3208
            val = bool(live_data.get(instance.name))
3209
        elif field == "status":
3210
          if instance.primary_node in off_nodes:
3211
            val = "ERROR_nodeoffline"
3212
          elif instance.primary_node in bad_nodes:
3213
            val = "ERROR_nodedown"
3214
          else:
3215
            running = bool(live_data.get(instance.name))
3216
            if running:
3217
              if instance.admin_up:
3218
                val = "running"
3219
              else:
3220
                val = "ERROR_up"
3221
            else:
3222
              if instance.admin_up:
3223
                val = "ERROR_down"
3224
              else:
3225
                val = "ADMIN_down"
3226
        elif field == "oper_ram":
3227
          if instance.primary_node in bad_nodes:
3228
            val = None
3229
          elif instance.name in live_data:
3230
            val = live_data[instance.name].get("memory", "?")
3231
          else:
3232
            val = "-"
3233
        elif field == "disk_template":
3234
          val = instance.disk_template
3235
        elif field == "ip":
3236
          val = instance.nics[0].ip
3237
        elif field == "bridge":
3238
          val = instance.nics[0].bridge
3239
        elif field == "mac":
3240
          val = instance.nics[0].mac
3241
        elif field == "sda_size" or field == "sdb_size":
3242
          idx = ord(field[2]) - ord('a')
3243
          try:
3244
            val = instance.FindDisk(idx).size
3245
          except errors.OpPrereqError:
3246
            val = None
3247
        elif field == "disk_usage": # total disk usage per node
3248
          disk_sizes = [{'size': disk.size} for disk in instance.disks]
3249
          val = _ComputeDiskSize(instance.disk_template, disk_sizes)
3250
        elif field == "tags":
3251
          val = list(instance.GetTags())
3252
        elif field == "serial_no":
3253
          val = instance.serial_no
3254
        elif field == "network_port":
3255
          val = instance.network_port
3256
        elif field == "hypervisor":
3257
          val = instance.hypervisor
3258
        elif field == "hvparams":
3259
          val = i_hv
3260
        elif (field.startswith(HVPREFIX) and
3261
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
3262
          val = i_hv.get(field[len(HVPREFIX):], None)
3263
        elif field == "beparams":
3264
          val = i_be
3265
        elif (field.startswith(BEPREFIX) and
3266
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
3267
          val = i_be.get(field[len(BEPREFIX):], None)
3268
        elif st_match and st_match.groups():
3269
          # matches a variable list
3270
          st_groups = st_match.groups()
3271
          if st_groups and st_groups[0] == "disk":
3272
            if st_groups[1] == "count":
3273
              val = len(instance.disks)
3274
            elif st_groups[1] == "sizes":
3275
              val = [disk.size for disk in instance.disks]
3276
            elif st_groups[1] == "size":
3277
              try:
3278
                val = instance.FindDisk(st_groups[2]).size
3279
              except errors.OpPrereqError:
3280
                val = None
3281
            else:
3282
              assert False, "Unhandled disk parameter"
3283
          elif st_groups[0] == "nic":
3284
            if st_groups[1] == "count":
3285
              val = len(instance.nics)
3286
            elif st_groups[1] == "macs":
3287
              val = [nic.mac for nic in instance.nics]
3288
            elif st_groups[1] == "ips":
3289
              val = [nic.ip for nic in instance.nics]
3290
            elif st_groups[1] == "bridges":
3291
              val = [nic.bridge for nic in instance.nics]
3292
            else:
3293
              # index-based item
3294
              nic_idx = int(st_groups[2])
3295
              if nic_idx >= len(instance.nics):
3296
                val = None
3297
              else:
3298
                if st_groups[1] == "mac":
3299
                  val = instance.nics[nic_idx].mac
3300
                elif st_groups[1] == "ip":
3301
                  val = instance.nics[nic_idx].ip
3302
                elif st_groups[1] == "bridge":
3303
                  val = instance.nics[nic_idx].bridge
3304
                else:
3305
                  assert False, "Unhandled NIC parameter"
3306
          else:
3307
            assert False, "Unhandled variable parameter"
3308
        else:
3309
          raise errors.ParameterError(field)
3310
        iout.append(val)
3311
      output.append(iout)
3312

    
3313
    return output
3314

    
3315

    
3316
class LUFailoverInstance(LogicalUnit):
3317
  """Failover an instance.
3318

3319
  """
3320
  HPATH = "instance-failover"
3321
  HTYPE = constants.HTYPE_INSTANCE
3322
  _OP_REQP = ["instance_name", "ignore_consistency"]
3323
  REQ_BGL = False
3324

    
3325
  def ExpandNames(self):
3326
    self._ExpandAndLockInstance()
3327
    self.needed_locks[locking.LEVEL_NODE] = []
3328
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3329

    
3330
  def DeclareLocks(self, level):
3331
    if level == locking.LEVEL_NODE:
3332
      self._LockInstancesNodes()
3333

    
3334
  def BuildHooksEnv(self):
3335
    """Build hooks env.
3336

3337
    This runs on master, primary and secondary nodes of the instance.
3338

3339
    """
3340
    env = {
3341
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
3342
      }
3343
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3344
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3345
    return env, nl, nl
3346

    
3347
  def CheckPrereq(self):
3348
    """Check prerequisites.
3349

3350
    This checks that the instance is in the cluster.
3351

3352
    """
3353
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3354
    assert self.instance is not None, \
3355
      "Cannot retrieve locked instance %s" % self.op.instance_name
3356

    
3357
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3358
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3359
      raise errors.OpPrereqError("Instance's disk layout is not"
3360
                                 " network mirrored, cannot failover.")
3361

    
3362
    secondary_nodes = instance.secondary_nodes
3363
    if not secondary_nodes:
3364
      raise errors.ProgrammerError("no secondary node but using "
3365
                                   "a mirrored disk template")
3366

    
3367
    target_node = secondary_nodes[0]
3368
    _CheckNodeOnline(self, target_node)
3369
    # check memory requirements on the secondary node
3370
    _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
3371
                         instance.name, bep[constants.BE_MEMORY],
3372
                         instance.hypervisor)
3373

    
3374
    # check bridge existance
3375
    brlist = [nic.bridge for nic in instance.nics]
3376
    result = self.rpc.call_bridges_exist(target_node, brlist)
3377
    result.Raise()
3378
    if not result.data:
3379
      raise errors.OpPrereqError("One or more target bridges %s does not"
3380
                                 " exist on destination node '%s'" %
3381
                                 (brlist, target_node))
3382

    
3383
  def Exec(self, feedback_fn):
3384
    """Failover an instance.
3385

3386
    The failover is done by shutting it down on its present node and
3387
    starting it on the secondary.
3388

3389
    """
3390
    instance = self.instance
3391

    
3392
    source_node = instance.primary_node
3393
    target_node = instance.secondary_nodes[0]
3394

    
3395
    feedback_fn("* checking disk consistency between source and target")
3396
    for dev in instance.disks:
3397
      # for drbd, these are drbd over lvm
3398
      if not _CheckDiskConsistency(self, dev, target_node, False):
3399
        if instance.admin_up and not self.op.ignore_consistency:
3400
          raise errors.OpExecError("Disk %s is degraded on target node,"
3401
                                   " aborting failover." % dev.iv_name)
3402

    
3403
    feedback_fn("* shutting down instance on source node")
3404
    logging.info("Shutting down instance %s on node %s",
3405
                 instance.name, source_node)
3406

    
3407
    result = self.rpc.call_instance_shutdown(source_node, instance)
3408
    if result.failed or not result.data:
3409
      if self.op.ignore_consistency:
3410
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
3411
                             " Proceeding"
3412
                             " anyway. Please make sure node %s is down",
3413
                             instance.name, source_node, source_node)
3414
      else:
3415
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
3416
                                 (instance.name, source_node))
3417

    
3418
    feedback_fn("* deactivating the instance's disks on source node")
3419
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3420
      raise errors.OpExecError("Can't shut down the instance's disks.")
3421

    
3422
    instance.primary_node = target_node
3423
    # distribute new instance config to the other nodes
3424
    self.cfg.Update(instance)
3425

    
3426
    # Only start the instance if it's marked as up
3427
    if instance.admin_up:
3428
      feedback_fn("* activating the instance's disks on target node")
3429
      logging.info("Starting instance %s on node %s",
3430
                   instance.name, target_node)
3431

    
3432
      disks_ok, dummy = _AssembleInstanceDisks(self, instance,
3433
                                               ignore_secondaries=True)
3434
      if not disks_ok:
3435
        _ShutdownInstanceDisks(self, instance)
3436
        raise errors.OpExecError("Can't activate the instance's disks")
3437

    
3438
      feedback_fn("* starting the instance on the target node")
3439
      result = self.rpc.call_instance_start(target_node, instance, None)
3440
      msg = result.RemoteFailMsg()
3441
      if msg:
3442
        _ShutdownInstanceDisks(self, instance)
3443
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
3444
                                 (instance.name, target_node, msg))
3445

    
3446

    
3447
class LUMigrateInstance(LogicalUnit):
3448
  """Migrate an instance.
3449

3450
  This is migration without shutting down, compared to the failover,
3451
  which is done with shutdown.
3452

3453
  """
3454
  HPATH = "instance-migrate"
3455
  HTYPE = constants.HTYPE_INSTANCE
3456
  _OP_REQP = ["instance_name", "live", "cleanup"]
3457

    
3458
  REQ_BGL = False
3459

    
3460
  def ExpandNames(self):
3461
    self._ExpandAndLockInstance()
3462
    self.needed_locks[locking.LEVEL_NODE] = []
3463
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3464

    
3465
  def DeclareLocks(self, level):
3466
    if level == locking.LEVEL_NODE:
3467
      self._LockInstancesNodes()
3468

    
3469
  def BuildHooksEnv(self):
3470
    """Build hooks env.
3471

3472
    This runs on master, primary and secondary nodes of the instance.
3473

3474
    """
3475
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3476
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3477
    return env, nl, nl
3478

    
3479
  def CheckPrereq(self):
3480
    """Check prerequisites.
3481

3482
    This checks that the instance is in the cluster.
3483

3484
    """
3485
    instance = self.cfg.GetInstanceInfo(
3486
      self.cfg.ExpandInstanceName(self.op.instance_name))
3487
    if instance is None:
3488
      raise errors.OpPrereqError("Instance '%s' not known" %
3489
                                 self.op.instance_name)
3490

    
3491
    if instance.disk_template != constants.DT_DRBD8:
3492
      raise errors.OpPrereqError("Instance's disk layout is not"
3493
                                 " drbd8, cannot migrate.")
3494

    
3495
    secondary_nodes = instance.secondary_nodes
3496
    if not secondary_nodes:
3497
      raise errors.ProgrammerError("no secondary node but using "
3498
                                   "drbd8 disk template")
3499

    
3500
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
3501

    
3502
    target_node = secondary_nodes[0]
3503
    # check memory requirements on the secondary node
3504
    _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
3505
                         instance.name, i_be[constants.BE_MEMORY],
3506
                         instance.hypervisor)
3507

    
3508
    # check bridge existance
3509
    brlist = [nic.bridge for nic in instance.nics]
3510
    result = self.rpc.call_bridges_exist(target_node, brlist)
3511
    if result.failed or not result.data:
3512
      raise errors.OpPrereqError("One or more target bridges %s does not"
3513
                                 " exist on destination node '%s'" %
3514
                                 (brlist, target_node))
3515

    
3516
    if not self.op.cleanup:
3517
      result = self.rpc.call_instance_migratable(instance.primary_node,
3518
                                                 instance)
3519
      msg = result.RemoteFailMsg()
3520
      if msg:
3521
        raise errors.OpPrereqError("Can't migrate: %s - please use failover" %
3522
                                   msg)
3523

    
3524
    self.instance = instance
3525

    
3526
  def _WaitUntilSync(self):
3527
    """Poll with custom rpc for disk sync.
3528

3529
    This uses our own step-based rpc call.
3530

3531
    """
3532
    self.feedback_fn("* wait until resync is done")
3533
    all_done = False
3534
    while not all_done:
3535
      all_done = True
3536
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
3537
                                            self.nodes_ip,
3538
                                            self.instance.disks)
3539
      min_percent = 100
3540
      for node, nres in result.items():
3541
        msg = nres.RemoteFailMsg()
3542
        if msg:
3543
          raise errors.OpExecError("Cannot resync disks on node %s: %s" %
3544
                                   (node, msg))
3545
        node_done, node_percent = nres.payload
3546
        all_done = all_done and node_done
3547
        if node_percent is not None:
3548
          min_percent = min(min_percent, node_percent)
3549
      if not all_done:
3550
        if min_percent < 100:
3551
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
3552
        time.sleep(2)
3553

    
3554
  def _EnsureSecondary(self, node):
3555
    """Demote a node to secondary.
3556

3557
    """
3558
    self.feedback_fn("* switching node %s to secondary mode" % node)
3559

    
3560
    for dev in self.instance.disks:
3561
      self.cfg.SetDiskID(dev, node)
3562

    
3563
    result = self.rpc.call_blockdev_close(node, self.instance.name,
3564
                                          self.instance.disks)
3565
    msg = result.RemoteFailMsg()
3566
    if msg:
3567
      raise errors.OpExecError("Cannot change disk to secondary on node %s,"
3568
                               " error %s" % (node, msg))
3569

    
3570
  def _GoStandalone(self):
3571
    """Disconnect from the network.
3572

3573
    """
3574
    self.feedback_fn("* changing into standalone mode")
3575
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
3576
                                               self.instance.disks)
3577
    for node, nres in result.items():
3578
      msg = nres.RemoteFailMsg()
3579
      if msg:
3580
        raise errors.OpExecError("Cannot disconnect disks node %s,"
3581
                                 " error %s" % (node, msg))
3582

    
3583
  def _GoReconnect(self, multimaster):
3584
    """Reconnect to the network.
3585

3586
    """
3587
    if multimaster:
3588
      msg = "dual-master"
3589
    else:
3590
      msg = "single-master"
3591
    self.feedback_fn("* changing disks into %s mode" % msg)
3592
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
3593
                                           self.instance.disks,
3594
                                           self.instance.name, multimaster)
3595
    for node, nres in result.items():
3596
      msg = nres.RemoteFailMsg()
3597
      if msg:
3598
        raise errors.OpExecError("Cannot change disks config on node %s,"
3599
                                 " error: %s" % (node, msg))
3600

    
3601
  def _ExecCleanup(self):
3602
    """Try to cleanup after a failed migration.
3603

3604
    The cleanup is done by:
3605
      - check that the instance is running only on one node
3606
        (and update the config if needed)
3607
      - change disks on its secondary node to secondary
3608
      - wait until disks are fully synchronized
3609
      - disconnect from the network
3610
      - change disks into single-master mode
3611
      - wait again until disks are fully synchronized
3612

3613
    """
3614
    instance = self.instance
3615
    target_node = self.target_node
3616
    source_node = self.source_node
3617

    
3618
    # check running on only one node
3619
    self.feedback_fn("* checking where the instance actually runs"
3620
                     " (if this hangs, the hypervisor might be in"
3621
                     " a bad state)")
3622
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
3623
    for node, result in ins_l.items():
3624
      result.Raise()
3625
      if not isinstance(result.data, list):
3626
        raise errors.OpExecError("Can't contact node '%s'" % node)
3627

    
3628
    runningon_source = instance.name in ins_l[source_node].data
3629
    runningon_target = instance.name in ins_l[target_node].data
3630

    
3631
    if runningon_source and runningon_target:
3632
      raise errors.OpExecError("Instance seems to be running on two nodes,"
3633
                               " or the hypervisor is confused. You will have"
3634
                               " to ensure manually that it runs only on one"
3635
                               " and restart this operation.")
3636

    
3637
    if not (runningon_source or runningon_target):
3638
      raise errors.OpExecError("Instance does not seem to be running at all."
3639
                               " In this case, it's safer to repair by"
3640
                               " running 'gnt-instance stop' to ensure disk"
3641
                               " shutdown, and then restarting it.")
3642

    
3643
    if runningon_target:
3644
      # the migration has actually succeeded, we need to update the config
3645
      self.feedback_fn("* instance running on secondary node (%s),"
3646
                       " updating config" % target_node)
3647
      instance.primary_node = target_node
3648
      self.cfg.Update(instance)
3649
      demoted_node = source_node
3650
    else:
3651
      self.feedback_fn("* instance confirmed to be running on its"
3652
                       " primary node (%s)" % source_node)
3653
      demoted_node = target_node
3654

    
3655
    self._EnsureSecondary(demoted_node)
3656
    try:
3657
      self._WaitUntilSync()
3658
    except errors.OpExecError:
3659
      # we ignore here errors, since if the device is standalone, it
3660
      # won't be able to sync
3661
      pass
3662
    self._GoStandalone()
3663
    self._GoReconnect(False)
3664
    self._WaitUntilSync()
3665

    
3666
    self.feedback_fn("* done")
3667

    
3668
  def _RevertDiskStatus(self):
3669
    """Try to revert the disk status after a failed migration.
3670

3671
    """
3672
    target_node = self.target_node
3673
    try:
3674
      self._EnsureSecondary(target_node)
3675
      self._GoStandalone()
3676
      self._GoReconnect(False)
3677
      self._WaitUntilSync()
3678
    except errors.OpExecError, err:
3679
      self.LogWarning("Migration failed and I can't reconnect the"
3680
                      " drives: error '%s'\n"
3681
                      "Please look and recover the instance status" %
3682
                      str(err))
3683

    
3684
  def _AbortMigration(self):
3685
    """Call the hypervisor code to abort a started migration.
3686

3687
    """
3688
    instance = self.instance
3689
    target_node = self.target_node
3690
    migration_info = self.migration_info
3691

    
3692
    abort_result = self.rpc.call_finalize_migration(target_node,
3693
                                                    instance,
3694
                                                    migration_info,
3695
                                                    False)
3696
    abort_msg = abort_result.RemoteFailMsg()
3697
    if abort_msg:
3698
      logging.error("Aborting migration failed on target node %s: %s" %
3699
                    (target_node, abort_msg))
3700
      # Don't raise an exception here, as we stil have to try to revert the
3701
      # disk status, even if this step failed.
3702

    
3703
  def _ExecMigration(self):
3704
    """Migrate an instance.
3705

3706
    The migrate is done by:
3707
      - change the disks into dual-master mode
3708
      - wait until disks are fully synchronized again
3709
      - migrate the instance
3710
      - change disks on the new secondary node (the old primary) to secondary
3711
      - wait until disks are fully synchronized
3712
      - change disks into single-master mode
3713

3714
    """
3715
    instance = self.instance
3716
    target_node = self.target_node
3717
    source_node = self.source_node
3718

    
3719
    self.feedback_fn("* checking disk consistency between source and target")
3720
    for dev in instance.disks:
3721
      if not _CheckDiskConsistency(self, dev, target_node, False):
3722
        raise errors.OpExecError("Disk %s is degraded or not fully"
3723
                                 " synchronized on target node,"
3724
                                 " aborting migrate." % dev.iv_name)
3725

    
3726
    # First get the migration information from the remote node
3727
    result = self.rpc.call_migration_info(source_node, instance)
3728
    msg = result.RemoteFailMsg()
3729
    if msg:
3730
      log_err = ("Failed fetching source migration information from %s: %s" %
3731
                 (source_node, msg))
3732
      logging.error(log_err)
3733
      raise errors.OpExecError(log_err)
3734

    
3735
    self.migration_info = migration_info = result.payload
3736

    
3737
    # Then switch the disks to master/master mode
3738
    self._EnsureSecondary(target_node)
3739
    self._GoStandalone()
3740
    self._GoReconnect(True)
3741
    self._WaitUntilSync()
3742

    
3743
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
3744
    result = self.rpc.call_accept_instance(target_node,
3745
                                           instance,
3746
                                           migration_info,
3747
                                           self.nodes_ip[target_node])
3748

    
3749
    msg = result.RemoteFailMsg()
3750
    if msg:
3751
      logging.error("Instance pre-migration failed, trying to revert"
3752
                    " disk status: %s", msg)
3753
      self._AbortMigration()
3754
      self._RevertDiskStatus()
3755
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
3756
                               (instance.name, msg))
3757

    
3758
    self.feedback_fn("* migrating instance to %s" % target_node)
3759
    time.sleep(10)
3760
    result = self.rpc.call_instance_migrate(source_node, instance,
3761
                                            self.nodes_ip[target_node],
3762
                                            self.op.live)
3763
    msg = result.RemoteFailMsg()
3764
    if msg:
3765
      logging.error("Instance migration failed, trying to revert"
3766
                    " disk status: %s", msg)
3767
      self._AbortMigration()
3768
      self._RevertDiskStatus()
3769
      raise errors.OpExecError("Could not migrate instance %s: %s" %
3770
                               (instance.name, msg))
3771
    time.sleep(10)
3772

    
3773
    instance.primary_node = target_node
3774
    # distribute new instance config to the other nodes
3775
    self.cfg.Update(instance)
3776

    
3777
    result = self.rpc.call_finalize_migration(target_node,
3778
                                              instance,
3779
                                              migration_info,
3780
                                              True)
3781
    msg = result.RemoteFailMsg()
3782
    if msg:
3783
      logging.error("Instance migration succeeded, but finalization failed:"
3784
                    " %s" % msg)
3785
      raise errors.OpExecError("Could not finalize instance migration: %s" %
3786
                               msg)
3787

    
3788
    self._EnsureSecondary(source_node)
3789
    self._WaitUntilSync()
3790
    self._GoStandalone()
3791
    self._GoReconnect(False)
3792
    self._WaitUntilSync()
3793

    
3794
    self.feedback_fn("* done")
3795

    
3796
  def Exec(self, feedback_fn):
3797
    """Perform the migration.
3798

3799
    """
3800
    self.feedback_fn = feedback_fn
3801

    
3802
    self.source_node = self.instance.primary_node
3803
    self.target_node = self.instance.secondary_nodes[0]
3804
    self.all_nodes = [self.source_node, self.target_node]
3805
    self.nodes_ip = {
3806
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
3807
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
3808
      }
3809
    if self.op.cleanup:
3810
      return self._ExecCleanup()
3811
    else:
3812
      return self._ExecMigration()
3813

    
3814

    
3815
def _CreateBlockDev(lu, node, instance, device, force_create,
3816
                    info, force_open):
3817
  """Create a tree of block devices on a given node.
3818

3819
  If this device type has to be created on secondaries, create it and
3820
  all its children.
3821

3822
  If not, just recurse to children keeping the same 'force' value.
3823

3824
  @param lu: the lu on whose behalf we execute
3825
  @param node: the node on which to create the device
3826
  @type instance: L{objects.Instance}
3827
  @param instance: the instance which owns the device
3828
  @type device: L{objects.Disk}
3829
  @param device: the device to create
3830
  @type force_create: boolean
3831
  @param force_create: whether to force creation of this device; this
3832
      will be change to True whenever we find a device which has
3833
      CreateOnSecondary() attribute
3834
  @param info: the extra 'metadata' we should attach to the device
3835
      (this will be represented as a LVM tag)
3836
  @type force_open: boolean
3837
  @param force_open: this parameter will be passes to the
3838
      L{backend.BlockdevCreate} function where it specifies
3839
      whether we run on primary or not, and it affects both
3840
      the child assembly and the device own Open() execution
3841

3842
  """
3843
  if device.CreateOnSecondary():
3844
    force_create = True
3845

    
3846
  if device.children:
3847
    for child in device.children:
3848
      _CreateBlockDev(lu, node, instance, child, force_create,
3849
                      info, force_open)
3850

    
3851
  if not force_create:
3852
    return
3853

    
3854
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
3855

    
3856

    
3857
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
3858
  """Create a single block device on a given node.
3859

3860
  This will not recurse over children of the device, so they must be
3861
  created in advance.
3862

3863
  @param lu: the lu on whose behalf we execute
3864
  @param node: the node on which to create the device
3865
  @type instance: L{objects.Instance}
3866
  @param instance: the instance which owns the device
3867
  @type device: L{objects.Disk}
3868
  @param device: the device to create
3869
  @param info: the extra 'metadata' we should attach to the device
3870
      (this will be represented as a LVM tag)
3871
  @type force_open: boolean
3872
  @param force_open: this parameter will be passes to the
3873
      L{backend.BlockdevCreate} function where it specifies
3874
      whether we run on primary or not, and it affects both
3875
      the child assembly and the device own Open() execution
3876

3877
  """
3878
  lu.cfg.SetDiskID(device, node)
3879
  result = lu.rpc.call_blockdev_create(node, device, device.size,
3880
                                       instance.name, force_open, info)
3881
  msg = result.RemoteFailMsg()
3882
  if msg:
3883
    raise errors.OpExecError("Can't create block device %s on"
3884
                             " node %s for instance %s: %s" %
3885
                             (device, node, instance.name, msg))
3886
  if device.physical_id is None:
3887
    device.physical_id = result.payload
3888

    
3889

    
3890
def _GenerateUniqueNames(lu, exts):
3891
  """Generate a suitable LV name.
3892

3893
  This will generate a logical volume name for the given instance.
3894

3895
  """
3896
  results = []
3897
  for val in exts:
3898
    new_id = lu.cfg.GenerateUniqueID()
3899
    results.append("%s%s" % (new_id, val))
3900
  return results
3901

    
3902

    
3903
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
3904
                         p_minor, s_minor):
3905
  """Generate a drbd8 device complete with its children.
3906

3907
  """
3908
  port = lu.cfg.AllocatePort()
3909
  vgname = lu.cfg.GetVGName()
3910
  shared_secret = lu.cfg.GenerateDRBDSecret()
3911
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3912
                          logical_id=(vgname, names[0]))
3913
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3914
                          logical_id=(vgname, names[1]))
3915
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
3916
                          logical_id=(primary, secondary, port,
3917
                                      p_minor, s_minor,
3918
                                      shared_secret),
3919
                          children=[dev_data, dev_meta],
3920
                          iv_name=iv_name)
3921
  return drbd_dev
3922

    
3923

    
3924
def _GenerateDiskTemplate(lu, template_name,
3925
                          instance_name, primary_node,
3926
                          secondary_nodes, disk_info,
3927
                          file_storage_dir, file_driver,
3928
                          base_index):
3929
  """Generate the entire disk layout for a given template type.
3930

3931
  """
3932
  #TODO: compute space requirements
3933

    
3934
  vgname = lu.cfg.GetVGName()
3935
  disk_count = len(disk_info)
3936
  disks = []
3937
  if template_name == constants.DT_DISKLESS:
3938
    pass
3939
  elif template_name == constants.DT_PLAIN:
3940
    if len(secondary_nodes) != 0:
3941
      raise errors.ProgrammerError("Wrong template configuration")
3942

    
3943
    names = _GenerateUniqueNames(lu, [".disk%d" % i
3944
                                      for i in range(disk_count)])
3945
    for idx, disk in enumerate(disk_info):
3946
      disk_index = idx + base_index
3947
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
3948
                              logical_id=(vgname, names[idx]),
3949
                              iv_name="disk/%d" % disk_index,
3950
                              mode=disk["mode"])
3951
      disks.append(disk_dev)
3952
  elif template_name == constants.DT_DRBD8:
3953
    if len(secondary_nodes) != 1:
3954
      raise errors.ProgrammerError("Wrong template configuration")
3955
    remote_node = secondary_nodes[0]
3956
    minors = lu.cfg.AllocateDRBDMinor(
3957
      [primary_node, remote_node] * len(disk_info), instance_name)
3958

    
3959
    names = []
3960
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % i
3961
                                               for i in range(disk_count)]):
3962
      names.append(lv_prefix + "_data")
3963
      names.append(lv_prefix + "_meta")
3964
    for idx, disk in enumerate(disk_info):
3965
      disk_index = idx + base_index
3966
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
3967
                                      disk["size"], names[idx*2:idx*2+2],
3968
                                      "disk/%d" % disk_index,
3969
                                      minors[idx*2], minors[idx*2+1])
3970
      disk_dev.mode = disk["mode"]
3971
      disks.append(disk_dev)
3972
  elif template_name == constants.DT_FILE:
3973
    if len(secondary_nodes) != 0:
3974
      raise errors.ProgrammerError("Wrong template configuration")
3975

    
3976
    for idx, disk in enumerate(disk_info):
3977
      disk_index = idx + base_index
3978
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
3979
                              iv_name="disk/%d" % disk_index,
3980
                              logical_id=(file_driver,
3981
                                          "%s/disk%d" % (file_storage_dir,
3982
                                                         idx)),
3983
                              mode=disk["mode"])
3984
      disks.append(disk_dev)
3985
  else:
3986
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
3987
  return disks
3988

    
3989

    
3990
def _GetInstanceInfoText(instance):
3991
  """Compute that text that should be added to the disk's metadata.
3992

3993
  """
3994
  return "originstname+%s" % instance.name
3995

    
3996

    
3997
def _CreateDisks(lu, instance):
3998
  """Create all disks for an instance.
3999

4000
  This abstracts away some work from AddInstance.
4001

4002
  @type lu: L{LogicalUnit}
4003
  @param lu: the logical unit on whose behalf we execute
4004
  @type instance: L{objects.Instance}
4005
  @param instance: the instance whose disks we should create
4006
  @rtype: boolean
4007
  @return: the success of the creation
4008

4009
  """
4010
  info = _GetInstanceInfoText(instance)
4011
  pnode = instance.primary_node
4012

    
4013
  if instance.disk_template == constants.DT_FILE: