Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 4978db17

History | View | Annotate | Download (233.5 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the master-side code."""
23

    
24
# pylint: disable-msg=W0613,W0201
25

    
26
import os
27
import os.path
28
import sha
29
import time
30
import tempfile
31
import re
32
import platform
33
import logging
34
import copy
35
import random
36

    
37
from ganeti import ssh
38
from ganeti import utils
39
from ganeti import errors
40
from ganeti import hypervisor
41
from ganeti import locking
42
from ganeti import constants
43
from ganeti import objects
44
from ganeti import opcodes
45
from ganeti import serializer
46
from ganeti import ssconf
47

    
48

    
49
class LogicalUnit(object):
50
  """Logical Unit base class.
51

52
  Subclasses must follow these rules:
53
    - implement ExpandNames
54
    - implement CheckPrereq
55
    - implement Exec
56
    - implement BuildHooksEnv
57
    - redefine HPATH and HTYPE
58
    - optionally redefine their run requirements:
59
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
60

61
  Note that all commands require root permissions.
62

63
  """
64
  HPATH = None
65
  HTYPE = None
66
  _OP_REQP = []
67
  REQ_BGL = True
68

    
69
  def __init__(self, processor, op, context, rpc):
70
    """Constructor for LogicalUnit.
71

72
    This needs to be overriden in derived classes in order to check op
73
    validity.
74

75
    """
76
    self.proc = processor
77
    self.op = op
78
    self.cfg = context.cfg
79
    self.context = context
80
    self.rpc = rpc
81
    # Dicts used to declare locking needs to mcpu
82
    self.needed_locks = None
83
    self.acquired_locks = {}
84
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
85
    self.add_locks = {}
86
    self.remove_locks = {}
87
    # Used to force good behavior when calling helper functions
88
    self.recalculate_locks = {}
89
    self.__ssh = None
90
    # logging
91
    self.LogWarning = processor.LogWarning
92
    self.LogInfo = processor.LogInfo
93

    
94
    for attr_name in self._OP_REQP:
95
      attr_val = getattr(op, attr_name, None)
96
      if attr_val is None:
97
        raise errors.OpPrereqError("Required parameter '%s' missing" %
98
                                   attr_name)
99
    self.CheckArguments()
100

    
101
  def __GetSSH(self):
102
    """Returns the SshRunner object
103

104
    """
105
    if not self.__ssh:
106
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
107
    return self.__ssh
108

    
109
  ssh = property(fget=__GetSSH)
110

    
111
  def CheckArguments(self):
112
    """Check syntactic validity for the opcode arguments.
113

114
    This method is for doing a simple syntactic check and ensure
115
    validity of opcode parameters, without any cluster-related
116
    checks. While the same can be accomplished in ExpandNames and/or
117
    CheckPrereq, doing these separate is better because:
118

119
      - ExpandNames is left as as purely a lock-related function
120
      - CheckPrereq is run after we have aquired locks (and possible
121
        waited for them)
122

123
    The function is allowed to change the self.op attribute so that
124
    later methods can no longer worry about missing parameters.
125

126
    """
127
    pass
128

    
129
  def ExpandNames(self):
130
    """Expand names for this LU.
131

132
    This method is called before starting to execute the opcode, and it should
133
    update all the parameters of the opcode to their canonical form (e.g. a
134
    short node name must be fully expanded after this method has successfully
135
    completed). This way locking, hooks, logging, ecc. can work correctly.
136

137
    LUs which implement this method must also populate the self.needed_locks
138
    member, as a dict with lock levels as keys, and a list of needed lock names
139
    as values. Rules:
140

141
      - use an empty dict if you don't need any lock
142
      - if you don't need any lock at a particular level omit that level
143
      - don't put anything for the BGL level
144
      - if you want all locks at a level use locking.ALL_SET as a value
145

146
    If you need to share locks (rather than acquire them exclusively) at one
147
    level you can modify self.share_locks, setting a true value (usually 1) for
148
    that level. By default locks are not shared.
149

150
    Examples::
151

152
      # Acquire all nodes and one instance
153
      self.needed_locks = {
154
        locking.LEVEL_NODE: locking.ALL_SET,
155
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
156
      }
157
      # Acquire just two nodes
158
      self.needed_locks = {
159
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
160
      }
161
      # Acquire no locks
162
      self.needed_locks = {} # No, you can't leave it to the default value None
163

164
    """
165
    # The implementation of this method is mandatory only if the new LU is
166
    # concurrent, so that old LUs don't need to be changed all at the same
167
    # time.
168
    if self.REQ_BGL:
169
      self.needed_locks = {} # Exclusive LUs don't need locks.
170
    else:
171
      raise NotImplementedError
172

    
173
  def DeclareLocks(self, level):
174
    """Declare LU locking needs for a level
175

176
    While most LUs can just declare their locking needs at ExpandNames time,
177
    sometimes there's the need to calculate some locks after having acquired
178
    the ones before. This function is called just before acquiring locks at a
179
    particular level, but after acquiring the ones at lower levels, and permits
180
    such calculations. It can be used to modify self.needed_locks, and by
181
    default it does nothing.
182

183
    This function is only called if you have something already set in
184
    self.needed_locks for the level.
185

186
    @param level: Locking level which is going to be locked
187
    @type level: member of ganeti.locking.LEVELS
188

189
    """
190

    
191
  def CheckPrereq(self):
192
    """Check prerequisites for this LU.
193

194
    This method should check that the prerequisites for the execution
195
    of this LU are fulfilled. It can do internode communication, but
196
    it should be idempotent - no cluster or system changes are
197
    allowed.
198

199
    The method should raise errors.OpPrereqError in case something is
200
    not fulfilled. Its return value is ignored.
201

202
    This method should also update all the parameters of the opcode to
203
    their canonical form if it hasn't been done by ExpandNames before.
204

205
    """
206
    raise NotImplementedError
207

    
208
  def Exec(self, feedback_fn):
209
    """Execute the LU.
210

211
    This method should implement the actual work. It should raise
212
    errors.OpExecError for failures that are somewhat dealt with in
213
    code, or expected.
214

215
    """
216
    raise NotImplementedError
217

    
218
  def BuildHooksEnv(self):
219
    """Build hooks environment for this LU.
220

221
    This method should return a three-node tuple consisting of: a dict
222
    containing the environment that will be used for running the
223
    specific hook for this LU, a list of node names on which the hook
224
    should run before the execution, and a list of node names on which
225
    the hook should run after the execution.
226

227
    The keys of the dict must not have 'GANETI_' prefixed as this will
228
    be handled in the hooks runner. Also note additional keys will be
229
    added by the hooks runner. If the LU doesn't define any
230
    environment, an empty dict (and not None) should be returned.
231

232
    No nodes should be returned as an empty list (and not None).
233

234
    Note that if the HPATH for a LU class is None, this function will
235
    not be called.
236

237
    """
238
    raise NotImplementedError
239

    
240
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
241
    """Notify the LU about the results of its hooks.
242

243
    This method is called every time a hooks phase is executed, and notifies
244
    the Logical Unit about the hooks' result. The LU can then use it to alter
245
    its result based on the hooks.  By default the method does nothing and the
246
    previous result is passed back unchanged but any LU can define it if it
247
    wants to use the local cluster hook-scripts somehow.
248

249
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
250
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
251
    @param hook_results: the results of the multi-node hooks rpc call
252
    @param feedback_fn: function used send feedback back to the caller
253
    @param lu_result: the previous Exec result this LU had, or None
254
        in the PRE phase
255
    @return: the new Exec result, based on the previous result
256
        and hook results
257

258
    """
259
    return lu_result
260

    
261
  def _ExpandAndLockInstance(self):
262
    """Helper function to expand and lock an instance.
263

264
    Many LUs that work on an instance take its name in self.op.instance_name
265
    and need to expand it and then declare the expanded name for locking. This
266
    function does it, and then updates self.op.instance_name to the expanded
267
    name. It also initializes needed_locks as a dict, if this hasn't been done
268
    before.
269

270
    """
271
    if self.needed_locks is None:
272
      self.needed_locks = {}
273
    else:
274
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
275
        "_ExpandAndLockInstance called with instance-level locks set"
276
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
277
    if expanded_name is None:
278
      raise errors.OpPrereqError("Instance '%s' not known" %
279
                                  self.op.instance_name)
280
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
281
    self.op.instance_name = expanded_name
282

    
283
  def _LockInstancesNodes(self, primary_only=False):
284
    """Helper function to declare instances' nodes for locking.
285

286
    This function should be called after locking one or more instances to lock
287
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
288
    with all primary or secondary nodes for instances already locked and
289
    present in self.needed_locks[locking.LEVEL_INSTANCE].
290

291
    It should be called from DeclareLocks, and for safety only works if
292
    self.recalculate_locks[locking.LEVEL_NODE] is set.
293

294
    In the future it may grow parameters to just lock some instance's nodes, or
295
    to just lock primaries or secondary nodes, if needed.
296

297
    If should be called in DeclareLocks in a way similar to::
298

299
      if level == locking.LEVEL_NODE:
300
        self._LockInstancesNodes()
301

302
    @type primary_only: boolean
303
    @param primary_only: only lock primary nodes of locked instances
304

305
    """
306
    assert locking.LEVEL_NODE in self.recalculate_locks, \
307
      "_LockInstancesNodes helper function called with no nodes to recalculate"
308

    
309
    # TODO: check if we're really been called with the instance locks held
310

    
311
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
312
    # future we might want to have different behaviors depending on the value
313
    # of self.recalculate_locks[locking.LEVEL_NODE]
314
    wanted_nodes = []
315
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
316
      instance = self.context.cfg.GetInstanceInfo(instance_name)
317
      wanted_nodes.append(instance.primary_node)
318
      if not primary_only:
319
        wanted_nodes.extend(instance.secondary_nodes)
320

    
321
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
322
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
323
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
324
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
325

    
326
    del self.recalculate_locks[locking.LEVEL_NODE]
327

    
328

    
329
class NoHooksLU(LogicalUnit):
330
  """Simple LU which runs no hooks.
331

332
  This LU is intended as a parent for other LogicalUnits which will
333
  run no hooks, in order to reduce duplicate code.
334

335
  """
336
  HPATH = None
337
  HTYPE = None
338

    
339

    
340
def _GetWantedNodes(lu, nodes):
341
  """Returns list of checked and expanded node names.
342

343
  @type lu: L{LogicalUnit}
344
  @param lu: the logical unit on whose behalf we execute
345
  @type nodes: list
346
  @param nodes: list of node names or None for all nodes
347
  @rtype: list
348
  @return: the list of nodes, sorted
349
  @raise errors.OpProgrammerError: if the nodes parameter is wrong type
350

351
  """
352
  if not isinstance(nodes, list):
353
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
354

    
355
  if not nodes:
356
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
357
      " non-empty list of nodes whose name is to be expanded.")
358

    
359
  wanted = []
360
  for name in nodes:
361
    node = lu.cfg.ExpandNodeName(name)
362
    if node is None:
363
      raise errors.OpPrereqError("No such node name '%s'" % name)
364
    wanted.append(node)
365

    
366
  return utils.NiceSort(wanted)
367

    
368

    
369
def _GetWantedInstances(lu, instances):
370
  """Returns list of checked and expanded instance names.
371

372
  @type lu: L{LogicalUnit}
373
  @param lu: the logical unit on whose behalf we execute
374
  @type instances: list
375
  @param instances: list of instance names or None for all instances
376
  @rtype: list
377
  @return: the list of instances, sorted
378
  @raise errors.OpPrereqError: if the instances parameter is wrong type
379
  @raise errors.OpPrereqError: if any of the passed instances is not found
380

381
  """
382
  if not isinstance(instances, list):
383
    raise errors.OpPrereqError("Invalid argument type 'instances'")
384

    
385
  if instances:
386
    wanted = []
387

    
388
    for name in instances:
389
      instance = lu.cfg.ExpandInstanceName(name)
390
      if instance is None:
391
        raise errors.OpPrereqError("No such instance name '%s'" % name)
392
      wanted.append(instance)
393

    
394
  else:
395
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
396
  return wanted
397

    
398

    
399
def _CheckOutputFields(static, dynamic, selected):
400
  """Checks whether all selected fields are valid.
401

402
  @type static: L{utils.FieldSet}
403
  @param static: static fields set
404
  @type dynamic: L{utils.FieldSet}
405
  @param dynamic: dynamic fields set
406

407
  """
408
  f = utils.FieldSet()
409
  f.Extend(static)
410
  f.Extend(dynamic)
411

    
412
  delta = f.NonMatching(selected)
413
  if delta:
414
    raise errors.OpPrereqError("Unknown output fields selected: %s"
415
                               % ",".join(delta))
416

    
417

    
418
def _CheckBooleanOpField(op, name):
419
  """Validates boolean opcode parameters.
420

421
  This will ensure that an opcode parameter is either a boolean value,
422
  or None (but that it always exists).
423

424
  """
425
  val = getattr(op, name, None)
426
  if not (val is None or isinstance(val, bool)):
427
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
428
                               (name, str(val)))
429
  setattr(op, name, val)
430

    
431

    
432
def _CheckNodeOnline(lu, node):
433
  """Ensure that a given node is online.
434

435
  @param lu: the LU on behalf of which we make the check
436
  @param node: the node to check
437
  @raise errors.OpPrereqError: if the nodes is offline
438

439
  """
440
  if lu.cfg.GetNodeInfo(node).offline:
441
    raise errors.OpPrereqError("Can't use offline node %s" % node)
442

    
443

    
444
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
445
                          memory, vcpus, nics):
446
  """Builds instance related env variables for hooks
447

448
  This builds the hook environment from individual variables.
449

450
  @type name: string
451
  @param name: the name of the instance
452
  @type primary_node: string
453
  @param primary_node: the name of the instance's primary node
454
  @type secondary_nodes: list
455
  @param secondary_nodes: list of secondary nodes as strings
456
  @type os_type: string
457
  @param os_type: the name of the instance's OS
458
  @type status: boolean
459
  @param status: the should_run status of the instance
460
  @type memory: string
461
  @param memory: the memory size of the instance
462
  @type vcpus: string
463
  @param vcpus: the count of VCPUs the instance has
464
  @type nics: list
465
  @param nics: list of tuples (ip, bridge, mac) representing
466
      the NICs the instance  has
467
  @rtype: dict
468
  @return: the hook environment for this instance
469

470
  """
471
  if status:
472
    str_status = "up"
473
  else:
474
    str_status = "down"
475
  env = {
476
    "OP_TARGET": name,
477
    "INSTANCE_NAME": name,
478
    "INSTANCE_PRIMARY": primary_node,
479
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
480
    "INSTANCE_OS_TYPE": os_type,
481
    "INSTANCE_STATUS": str_status,
482
    "INSTANCE_MEMORY": memory,
483
    "INSTANCE_VCPUS": vcpus,
484
  }
485

    
486
  if nics:
487
    nic_count = len(nics)
488
    for idx, (ip, bridge, mac) in enumerate(nics):
489
      if ip is None:
490
        ip = ""
491
      env["INSTANCE_NIC%d_IP" % idx] = ip
492
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
493
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
494
  else:
495
    nic_count = 0
496

    
497
  env["INSTANCE_NIC_COUNT"] = nic_count
498

    
499
  return env
500

    
501

    
502
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
503
  """Builds instance related env variables for hooks from an object.
504

505
  @type lu: L{LogicalUnit}
506
  @param lu: the logical unit on whose behalf we execute
507
  @type instance: L{objects.Instance}
508
  @param instance: the instance for which we should build the
509
      environment
510
  @type override: dict
511
  @param override: dictionary with key/values that will override
512
      our values
513
  @rtype: dict
514
  @return: the hook environment dictionary
515

516
  """
517
  bep = lu.cfg.GetClusterInfo().FillBE(instance)
518
  args = {
519
    'name': instance.name,
520
    'primary_node': instance.primary_node,
521
    'secondary_nodes': instance.secondary_nodes,
522
    'os_type': instance.os,
523
    'status': instance.admin_up,
524
    'memory': bep[constants.BE_MEMORY],
525
    'vcpus': bep[constants.BE_VCPUS],
526
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
527
  }
528
  if override:
529
    args.update(override)
530
  return _BuildInstanceHookEnv(**args)
531

    
532

    
533
def _AdjustCandidatePool(lu):
534
  """Adjust the candidate pool after node operations.
535

536
  """
537
  mod_list = lu.cfg.MaintainCandidatePool()
538
  if mod_list:
539
    lu.LogInfo("Promoted nodes to master candidate role: %s",
540
               ", ".join(node.name for node in mod_list))
541
    for name in mod_list:
542
      lu.context.ReaddNode(name)
543
  mc_now, mc_max = lu.cfg.GetMasterCandidateStats()
544
  if mc_now > mc_max:
545
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
546
               (mc_now, mc_max))
547

    
548

    
549
def _CheckInstanceBridgesExist(lu, instance):
550
  """Check that the brigdes needed by an instance exist.
551

552
  """
553
  # check bridges existance
554
  brlist = [nic.bridge for nic in instance.nics]
555
  result = lu.rpc.call_bridges_exist(instance.primary_node, brlist)
556
  result.Raise()
557
  if not result.data:
558
    raise errors.OpPrereqError("One or more target bridges %s does not"
559
                               " exist on destination node '%s'" %
560
                               (brlist, instance.primary_node))
561

    
562

    
563
class LUDestroyCluster(NoHooksLU):
564
  """Logical unit for destroying the cluster.
565

566
  """
567
  _OP_REQP = []
568

    
569
  def CheckPrereq(self):
570
    """Check prerequisites.
571

572
    This checks whether the cluster is empty.
573

574
    Any errors are signalled by raising errors.OpPrereqError.
575

576
    """
577
    master = self.cfg.GetMasterNode()
578

    
579
    nodelist = self.cfg.GetNodeList()
580
    if len(nodelist) != 1 or nodelist[0] != master:
581
      raise errors.OpPrereqError("There are still %d node(s) in"
582
                                 " this cluster." % (len(nodelist) - 1))
583
    instancelist = self.cfg.GetInstanceList()
584
    if instancelist:
585
      raise errors.OpPrereqError("There are still %d instance(s) in"
586
                                 " this cluster." % len(instancelist))
587

    
588
  def Exec(self, feedback_fn):
589
    """Destroys the cluster.
590

591
    """
592
    master = self.cfg.GetMasterNode()
593
    result = self.rpc.call_node_stop_master(master, False)
594
    result.Raise()
595
    if not result.data:
596
      raise errors.OpExecError("Could not disable the master role")
597
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
598
    utils.CreateBackup(priv_key)
599
    utils.CreateBackup(pub_key)
600
    return master
601

    
602

    
603
class LUVerifyCluster(LogicalUnit):
604
  """Verifies the cluster status.
605

606
  """
607
  HPATH = "cluster-verify"
608
  HTYPE = constants.HTYPE_CLUSTER
609
  _OP_REQP = ["skip_checks"]
610
  REQ_BGL = False
611

    
612
  def ExpandNames(self):
613
    self.needed_locks = {
614
      locking.LEVEL_NODE: locking.ALL_SET,
615
      locking.LEVEL_INSTANCE: locking.ALL_SET,
616
    }
617
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
618

    
619
  def _VerifyNode(self, nodeinfo, file_list, local_cksum,
620
                  node_result, feedback_fn, master_files,
621
                  drbd_map):
622
    """Run multiple tests against a node.
623

624
    Test list:
625

626
      - compares ganeti version
627
      - checks vg existance and size > 20G
628
      - checks config file checksum
629
      - checks ssh to other nodes
630

631
    @type nodeinfo: L{objects.Node}
632
    @param nodeinfo: the node to check
633
    @param file_list: required list of files
634
    @param local_cksum: dictionary of local files and their checksums
635
    @param node_result: the results from the node
636
    @param feedback_fn: function used to accumulate results
637
    @param master_files: list of files that only masters should have
638
    @param drbd_map: the useddrbd minors for this node, in
639
        form of minor: (instance, must_exist) which correspond to instances
640
        and their running status
641

642
    """
643
    node = nodeinfo.name
644

    
645
    # main result, node_result should be a non-empty dict
646
    if not node_result or not isinstance(node_result, dict):
647
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
648
      return True
649

    
650
    # compares ganeti version
651
    local_version = constants.PROTOCOL_VERSION
652
    remote_version = node_result.get('version', None)
653
    if not (remote_version and isinstance(remote_version, (list, tuple)) and
654
            len(remote_version) == 2):
655
      feedback_fn("  - ERROR: connection to %s failed" % (node))
656
      return True
657

    
658
    if local_version != remote_version[0]:
659
      feedback_fn("  - ERROR: incompatible protocol versions: master %s,"
660
                  " node %s %s" % (local_version, node, remote_version[0]))
661
      return True
662

    
663
    # node seems compatible, we can actually try to look into its results
664

    
665
    bad = False
666

    
667
    # full package version
668
    if constants.RELEASE_VERSION != remote_version[1]:
669
      feedback_fn("  - WARNING: software version mismatch: master %s,"
670
                  " node %s %s" %
671
                  (constants.RELEASE_VERSION, node, remote_version[1]))
672

    
673
    # checks vg existence and size > 20G
674

    
675
    vglist = node_result.get(constants.NV_VGLIST, None)
676
    if not vglist:
677
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
678
                      (node,))
679
      bad = True
680
    else:
681
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
682
                                            constants.MIN_VG_SIZE)
683
      if vgstatus:
684
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
685
        bad = True
686

    
687
    # checks config file checksum
688

    
689
    remote_cksum = node_result.get(constants.NV_FILELIST, None)
690
    if not isinstance(remote_cksum, dict):
691
      bad = True
692
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
693
    else:
694
      for file_name in file_list:
695
        node_is_mc = nodeinfo.master_candidate
696
        must_have_file = file_name not in master_files
697
        if file_name not in remote_cksum:
698
          if node_is_mc or must_have_file:
699
            bad = True
700
            feedback_fn("  - ERROR: file '%s' missing" % file_name)
701
        elif remote_cksum[file_name] != local_cksum[file_name]:
702
          if node_is_mc or must_have_file:
703
            bad = True
704
            feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
705
          else:
706
            # not candidate and this is not a must-have file
707
            bad = True
708
            feedback_fn("  - ERROR: non master-candidate has old/wrong file"
709
                        " '%s'" % file_name)
710
        else:
711
          # all good, except non-master/non-must have combination
712
          if not node_is_mc and not must_have_file:
713
            feedback_fn("  - ERROR: file '%s' should not exist on non master"
714
                        " candidates" % file_name)
715

    
716
    # checks ssh to any
717

    
718
    if constants.NV_NODELIST not in node_result:
719
      bad = True
720
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
721
    else:
722
      if node_result[constants.NV_NODELIST]:
723
        bad = True
724
        for node in node_result[constants.NV_NODELIST]:
725
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
726
                          (node, node_result[constants.NV_NODELIST][node]))
727

    
728
    if constants.NV_NODENETTEST not in node_result:
729
      bad = True
730
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
731
    else:
732
      if node_result[constants.NV_NODENETTEST]:
733
        bad = True
734
        nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys())
735
        for node in nlist:
736
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
737
                          (node, node_result[constants.NV_NODENETTEST][node]))
738

    
739
    hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
740
    if isinstance(hyp_result, dict):
741
      for hv_name, hv_result in hyp_result.iteritems():
742
        if hv_result is not None:
743
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
744
                      (hv_name, hv_result))
745

    
746
    # check used drbd list
747
    used_minors = node_result.get(constants.NV_DRBDLIST, [])
748
    for minor, (iname, must_exist) in drbd_map.items():
749
      if minor not in used_minors and must_exist:
750
        feedback_fn("  - ERROR: drbd minor %d of instance %s is not active" %
751
                    (minor, iname))
752
        bad = True
753
    for minor in used_minors:
754
      if minor not in drbd_map:
755
        feedback_fn("  - ERROR: unallocated drbd minor %d is in use" % minor)
756
        bad = True
757

    
758
    return bad
759

    
760
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
761
                      node_instance, feedback_fn, n_offline):
762
    """Verify an instance.
763

764
    This function checks to see if the required block devices are
765
    available on the instance's node.
766

767
    """
768
    bad = False
769

    
770
    node_current = instanceconfig.primary_node
771

    
772
    node_vol_should = {}
773
    instanceconfig.MapLVsByNode(node_vol_should)
774

    
775
    for node in node_vol_should:
776
      if node in n_offline:
777
        # ignore missing volumes on offline nodes
778
        continue
779
      for volume in node_vol_should[node]:
780
        if node not in node_vol_is or volume not in node_vol_is[node]:
781
          feedback_fn("  - ERROR: volume %s missing on node %s" %
782
                          (volume, node))
783
          bad = True
784

    
785
    if instanceconfig.admin_up:
786
      if ((node_current not in node_instance or
787
          not instance in node_instance[node_current]) and
788
          node_current not in n_offline):
789
        feedback_fn("  - ERROR: instance %s not running on node %s" %
790
                        (instance, node_current))
791
        bad = True
792

    
793
    for node in node_instance:
794
      if (not node == node_current):
795
        if instance in node_instance[node]:
796
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
797
                          (instance, node))
798
          bad = True
799

    
800
    return bad
801

    
802
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
803
    """Verify if there are any unknown volumes in the cluster.
804

805
    The .os, .swap and backup volumes are ignored. All other volumes are
806
    reported as unknown.
807

808
    """
809
    bad = False
810

    
811
    for node in node_vol_is:
812
      for volume in node_vol_is[node]:
813
        if node not in node_vol_should or volume not in node_vol_should[node]:
814
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
815
                      (volume, node))
816
          bad = True
817
    return bad
818

    
819
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
820
    """Verify the list of running instances.
821

822
    This checks what instances are running but unknown to the cluster.
823

824
    """
825
    bad = False
826
    for node in node_instance:
827
      for runninginstance in node_instance[node]:
828
        if runninginstance not in instancelist:
829
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
830
                          (runninginstance, node))
831
          bad = True
832
    return bad
833

    
834
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
835
    """Verify N+1 Memory Resilience.
836

837
    Check that if one single node dies we can still start all the instances it
838
    was primary for.
839

840
    """
841
    bad = False
842

    
843
    for node, nodeinfo in node_info.iteritems():
844
      # This code checks that every node which is now listed as secondary has
845
      # enough memory to host all instances it is supposed to should a single
846
      # other node in the cluster fail.
847
      # FIXME: not ready for failover to an arbitrary node
848
      # FIXME: does not support file-backed instances
849
      # WARNING: we currently take into account down instances as well as up
850
      # ones, considering that even if they're down someone might want to start
851
      # them even in the event of a node failure.
852
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
853
        needed_mem = 0
854
        for instance in instances:
855
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
856
          if bep[constants.BE_AUTO_BALANCE]:
857
            needed_mem += bep[constants.BE_MEMORY]
858
        if nodeinfo['mfree'] < needed_mem:
859
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
860
                      " failovers should node %s fail" % (node, prinode))
861
          bad = True
862
    return bad
863

    
864
  def CheckPrereq(self):
865
    """Check prerequisites.
866

867
    Transform the list of checks we're going to skip into a set and check that
868
    all its members are valid.
869

870
    """
871
    self.skip_set = frozenset(self.op.skip_checks)
872
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
873
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
874

    
875
  def BuildHooksEnv(self):
876
    """Build hooks env.
877

878
    Cluster-Verify hooks just rone in the post phase and their failure makes
879
    the output be logged in the verify output and the verification to fail.
880

881
    """
882
    all_nodes = self.cfg.GetNodeList()
883
    # TODO: populate the environment with useful information for verify hooks
884
    env = {}
885
    return env, [], all_nodes
886

    
887
  def Exec(self, feedback_fn):
888
    """Verify integrity of cluster, performing various test on nodes.
889

890
    """
891
    bad = False
892
    feedback_fn("* Verifying global settings")
893
    for msg in self.cfg.VerifyConfig():
894
      feedback_fn("  - ERROR: %s" % msg)
895

    
896
    vg_name = self.cfg.GetVGName()
897
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
898
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
899
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
900
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
901
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
902
                        for iname in instancelist)
903
    i_non_redundant = [] # Non redundant instances
904
    i_non_a_balanced = [] # Non auto-balanced instances
905
    n_offline = [] # List of offline nodes
906
    node_volume = {}
907
    node_instance = {}
908
    node_info = {}
909
    instance_cfg = {}
910

    
911
    # FIXME: verify OS list
912
    # do local checksums
913
    master_files = [constants.CLUSTER_CONF_FILE]
914

    
915
    file_names = ssconf.SimpleStore().GetFileList()
916
    file_names.append(constants.SSL_CERT_FILE)
917
    file_names.append(constants.RAPI_CERT_FILE)
918
    file_names.extend(master_files)
919

    
920
    local_checksums = utils.FingerprintFiles(file_names)
921

    
922
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
923
    node_verify_param = {
924
      constants.NV_FILELIST: file_names,
925
      constants.NV_NODELIST: [node.name for node in nodeinfo
926
                              if not node.offline],
927
      constants.NV_HYPERVISOR: hypervisors,
928
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
929
                                  node.secondary_ip) for node in nodeinfo
930
                                 if not node.offline],
931
      constants.NV_LVLIST: vg_name,
932
      constants.NV_INSTANCELIST: hypervisors,
933
      constants.NV_VGLIST: None,
934
      constants.NV_VERSION: None,
935
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
936
      constants.NV_DRBDLIST: None,
937
      }
938
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
939
                                           self.cfg.GetClusterName())
940

    
941
    cluster = self.cfg.GetClusterInfo()
942
    master_node = self.cfg.GetMasterNode()
943
    all_drbd_map = self.cfg.ComputeDRBDMap()
944

    
945
    for node_i in nodeinfo:
946
      node = node_i.name
947
      nresult = all_nvinfo[node].data
948

    
949
      if node_i.offline:
950
        feedback_fn("* Skipping offline node %s" % (node,))
951
        n_offline.append(node)
952
        continue
953

    
954
      if node == master_node:
955
        ntype = "master"
956
      elif node_i.master_candidate:
957
        ntype = "master candidate"
958
      else:
959
        ntype = "regular"
960
      feedback_fn("* Verifying node %s (%s)" % (node, ntype))
961

    
962
      if all_nvinfo[node].failed or not isinstance(nresult, dict):
963
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
964
        bad = True
965
        continue
966

    
967
      node_drbd = {}
968
      for minor, instance in all_drbd_map[node].items():
969
        instance = instanceinfo[instance]
970
        node_drbd[minor] = (instance.name, instance.admin_up)
971
      result = self._VerifyNode(node_i, file_names, local_checksums,
972
                                nresult, feedback_fn, master_files,
973
                                node_drbd)
974
      bad = bad or result
975

    
976
      lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
977
      if isinstance(lvdata, basestring):
978
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
979
                    (node, utils.SafeEncode(lvdata)))
980
        bad = True
981
        node_volume[node] = {}
982
      elif not isinstance(lvdata, dict):
983
        feedback_fn("  - ERROR: connection to %s failed (lvlist)" % (node,))
984
        bad = True
985
        continue
986
      else:
987
        node_volume[node] = lvdata
988

    
989
      # node_instance
990
      idata = nresult.get(constants.NV_INSTANCELIST, None)
991
      if not isinstance(idata, list):
992
        feedback_fn("  - ERROR: connection to %s failed (instancelist)" %
993
                    (node,))
994
        bad = True
995
        continue
996

    
997
      node_instance[node] = idata
998

    
999
      # node_info
1000
      nodeinfo = nresult.get(constants.NV_HVINFO, None)
1001
      if not isinstance(nodeinfo, dict):
1002
        feedback_fn("  - ERROR: connection to %s failed (hvinfo)" % (node,))
1003
        bad = True
1004
        continue
1005

    
1006
      try:
1007
        node_info[node] = {
1008
          "mfree": int(nodeinfo['memory_free']),
1009
          "dfree": int(nresult[constants.NV_VGLIST][vg_name]),
1010
          "pinst": [],
1011
          "sinst": [],
1012
          # dictionary holding all instances this node is secondary for,
1013
          # grouped by their primary node. Each key is a cluster node, and each
1014
          # value is a list of instances which have the key as primary and the
1015
          # current node as secondary.  this is handy to calculate N+1 memory
1016
          # availability if you can only failover from a primary to its
1017
          # secondary.
1018
          "sinst-by-pnode": {},
1019
        }
1020
      except ValueError:
1021
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
1022
        bad = True
1023
        continue
1024

    
1025
    node_vol_should = {}
1026

    
1027
    for instance in instancelist:
1028
      feedback_fn("* Verifying instance %s" % instance)
1029
      inst_config = instanceinfo[instance]
1030
      result =  self._VerifyInstance(instance, inst_config, node_volume,
1031
                                     node_instance, feedback_fn, n_offline)
1032
      bad = bad or result
1033
      inst_nodes_offline = []
1034

    
1035
      inst_config.MapLVsByNode(node_vol_should)
1036

    
1037
      instance_cfg[instance] = inst_config
1038

    
1039
      pnode = inst_config.primary_node
1040
      if pnode in node_info:
1041
        node_info[pnode]['pinst'].append(instance)
1042
      elif pnode not in n_offline:
1043
        feedback_fn("  - ERROR: instance %s, connection to primary node"
1044
                    " %s failed" % (instance, pnode))
1045
        bad = True
1046

    
1047
      if pnode in n_offline:
1048
        inst_nodes_offline.append(pnode)
1049

    
1050
      # If the instance is non-redundant we cannot survive losing its primary
1051
      # node, so we are not N+1 compliant. On the other hand we have no disk
1052
      # templates with more than one secondary so that situation is not well
1053
      # supported either.
1054
      # FIXME: does not support file-backed instances
1055
      if len(inst_config.secondary_nodes) == 0:
1056
        i_non_redundant.append(instance)
1057
      elif len(inst_config.secondary_nodes) > 1:
1058
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
1059
                    % instance)
1060

    
1061
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1062
        i_non_a_balanced.append(instance)
1063

    
1064
      for snode in inst_config.secondary_nodes:
1065
        if snode in node_info:
1066
          node_info[snode]['sinst'].append(instance)
1067
          if pnode not in node_info[snode]['sinst-by-pnode']:
1068
            node_info[snode]['sinst-by-pnode'][pnode] = []
1069
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
1070
        elif snode not in n_offline:
1071
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
1072
                      " %s failed" % (instance, snode))
1073
          bad = True
1074
        if snode in n_offline:
1075
          inst_nodes_offline.append(snode)
1076

    
1077
      if inst_nodes_offline:
1078
        # warn that the instance lives on offline nodes, and set bad=True
1079
        feedback_fn("  - ERROR: instance lives on offline node(s) %s" %
1080
                    ", ".join(inst_nodes_offline))
1081
        bad = True
1082

    
1083
    feedback_fn("* Verifying orphan volumes")
1084
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
1085
                                       feedback_fn)
1086
    bad = bad or result
1087

    
1088
    feedback_fn("* Verifying remaining instances")
1089
    result = self._VerifyOrphanInstances(instancelist, node_instance,
1090
                                         feedback_fn)
1091
    bad = bad or result
1092

    
1093
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1094
      feedback_fn("* Verifying N+1 Memory redundancy")
1095
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
1096
      bad = bad or result
1097

    
1098
    feedback_fn("* Other Notes")
1099
    if i_non_redundant:
1100
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1101
                  % len(i_non_redundant))
1102

    
1103
    if i_non_a_balanced:
1104
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1105
                  % len(i_non_a_balanced))
1106

    
1107
    if n_offline:
1108
      feedback_fn("  - NOTICE: %d offline node(s) found." % len(n_offline))
1109

    
1110
    return not bad
1111

    
1112
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1113
    """Analize the post-hooks' result
1114

1115
    This method analyses the hook result, handles it, and sends some
1116
    nicely-formatted feedback back to the user.
1117

1118
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1119
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1120
    @param hooks_results: the results of the multi-node hooks rpc call
1121
    @param feedback_fn: function used send feedback back to the caller
1122
    @param lu_result: previous Exec result
1123
    @return: the new Exec result, based on the previous result
1124
        and hook results
1125

1126
    """
1127
    # We only really run POST phase hooks, and are only interested in
1128
    # their results
1129
    if phase == constants.HOOKS_PHASE_POST:
1130
      # Used to change hooks' output to proper indentation
1131
      indent_re = re.compile('^', re.M)
1132
      feedback_fn("* Hooks Results")
1133
      if not hooks_results:
1134
        feedback_fn("  - ERROR: general communication failure")
1135
        lu_result = 1
1136
      else:
1137
        for node_name in hooks_results:
1138
          show_node_header = True
1139
          res = hooks_results[node_name]
1140
          if res.failed or res.data is False or not isinstance(res.data, list):
1141
            if res.offline:
1142
              # no need to warn or set fail return value
1143
              continue
1144
            feedback_fn("    Communication failure in hooks execution")
1145
            lu_result = 1
1146
            continue
1147
          for script, hkr, output in res.data:
1148
            if hkr == constants.HKR_FAIL:
1149
              # The node header is only shown once, if there are
1150
              # failing hooks on that node
1151
              if show_node_header:
1152
                feedback_fn("  Node %s:" % node_name)
1153
                show_node_header = False
1154
              feedback_fn("    ERROR: Script %s failed, output:" % script)
1155
              output = indent_re.sub('      ', output)
1156
              feedback_fn("%s" % output)
1157
              lu_result = 1
1158

    
1159
      return lu_result
1160

    
1161

    
1162
class LUVerifyDisks(NoHooksLU):
1163
  """Verifies the cluster disks status.
1164

1165
  """
1166
  _OP_REQP = []
1167
  REQ_BGL = False
1168

    
1169
  def ExpandNames(self):
1170
    self.needed_locks = {
1171
      locking.LEVEL_NODE: locking.ALL_SET,
1172
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1173
    }
1174
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1175

    
1176
  def CheckPrereq(self):
1177
    """Check prerequisites.
1178

1179
    This has no prerequisites.
1180

1181
    """
1182
    pass
1183

    
1184
  def Exec(self, feedback_fn):
1185
    """Verify integrity of cluster disks.
1186

1187
    """
1188
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
1189

    
1190
    vg_name = self.cfg.GetVGName()
1191
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1192
    instances = [self.cfg.GetInstanceInfo(name)
1193
                 for name in self.cfg.GetInstanceList()]
1194

    
1195
    nv_dict = {}
1196
    for inst in instances:
1197
      inst_lvs = {}
1198
      if (not inst.admin_up or
1199
          inst.disk_template not in constants.DTS_NET_MIRROR):
1200
        continue
1201
      inst.MapLVsByNode(inst_lvs)
1202
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1203
      for node, vol_list in inst_lvs.iteritems():
1204
        for vol in vol_list:
1205
          nv_dict[(node, vol)] = inst
1206

    
1207
    if not nv_dict:
1208
      return result
1209

    
1210
    node_lvs = self.rpc.call_volume_list(nodes, vg_name)
1211

    
1212
    to_act = set()
1213
    for node in nodes:
1214
      # node_volume
1215
      lvs = node_lvs[node]
1216
      if lvs.failed:
1217
        if not lvs.offline:
1218
          self.LogWarning("Connection to node %s failed: %s" %
1219
                          (node, lvs.data))
1220
        continue
1221
      lvs = lvs.data
1222
      if isinstance(lvs, basestring):
1223
        logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
1224
        res_nlvm[node] = lvs
1225
      elif not isinstance(lvs, dict):
1226
        logging.warning("Connection to node %s failed or invalid data"
1227
                        " returned", node)
1228
        res_nodes.append(node)
1229
        continue
1230

    
1231
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
1232
        inst = nv_dict.pop((node, lv_name), None)
1233
        if (not lv_online and inst is not None
1234
            and inst.name not in res_instances):
1235
          res_instances.append(inst.name)
1236

    
1237
    # any leftover items in nv_dict are missing LVs, let's arrange the
1238
    # data better
1239
    for key, inst in nv_dict.iteritems():
1240
      if inst.name not in res_missing:
1241
        res_missing[inst.name] = []
1242
      res_missing[inst.name].append(key)
1243

    
1244
    return result
1245

    
1246

    
1247
class LURenameCluster(LogicalUnit):
1248
  """Rename the cluster.
1249

1250
  """
1251
  HPATH = "cluster-rename"
1252
  HTYPE = constants.HTYPE_CLUSTER
1253
  _OP_REQP = ["name"]
1254

    
1255
  def BuildHooksEnv(self):
1256
    """Build hooks env.
1257

1258
    """
1259
    env = {
1260
      "OP_TARGET": self.cfg.GetClusterName(),
1261
      "NEW_NAME": self.op.name,
1262
      }
1263
    mn = self.cfg.GetMasterNode()
1264
    return env, [mn], [mn]
1265

    
1266
  def CheckPrereq(self):
1267
    """Verify that the passed name is a valid one.
1268

1269
    """
1270
    hostname = utils.HostInfo(self.op.name)
1271

    
1272
    new_name = hostname.name
1273
    self.ip = new_ip = hostname.ip
1274
    old_name = self.cfg.GetClusterName()
1275
    old_ip = self.cfg.GetMasterIP()
1276
    if new_name == old_name and new_ip == old_ip:
1277
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1278
                                 " cluster has changed")
1279
    if new_ip != old_ip:
1280
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1281
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1282
                                   " reachable on the network. Aborting." %
1283
                                   new_ip)
1284

    
1285
    self.op.name = new_name
1286

    
1287
  def Exec(self, feedback_fn):
1288
    """Rename the cluster.
1289

1290
    """
1291
    clustername = self.op.name
1292
    ip = self.ip
1293

    
1294
    # shutdown the master IP
1295
    master = self.cfg.GetMasterNode()
1296
    result = self.rpc.call_node_stop_master(master, False)
1297
    if result.failed or not result.data:
1298
      raise errors.OpExecError("Could not disable the master role")
1299

    
1300
    try:
1301
      cluster = self.cfg.GetClusterInfo()
1302
      cluster.cluster_name = clustername
1303
      cluster.master_ip = ip
1304
      self.cfg.Update(cluster)
1305

    
1306
      # update the known hosts file
1307
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
1308
      node_list = self.cfg.GetNodeList()
1309
      try:
1310
        node_list.remove(master)
1311
      except ValueError:
1312
        pass
1313
      result = self.rpc.call_upload_file(node_list,
1314
                                         constants.SSH_KNOWN_HOSTS_FILE)
1315
      for to_node, to_result in result.iteritems():
1316
        if to_result.failed or not to_result.data:
1317
          logging.error("Copy of file %s to node %s failed",
1318
                        constants.SSH_KNOWN_HOSTS_FILE, to_node)
1319

    
1320
    finally:
1321
      result = self.rpc.call_node_start_master(master, False)
1322
      if result.failed or not result.data:
1323
        self.LogWarning("Could not re-enable the master role on"
1324
                        " the master, please restart manually.")
1325

    
1326

    
1327
def _RecursiveCheckIfLVMBased(disk):
1328
  """Check if the given disk or its children are lvm-based.
1329

1330
  @type disk: L{objects.Disk}
1331
  @param disk: the disk to check
1332
  @rtype: booleean
1333
  @return: boolean indicating whether a LD_LV dev_type was found or not
1334

1335
  """
1336
  if disk.children:
1337
    for chdisk in disk.children:
1338
      if _RecursiveCheckIfLVMBased(chdisk):
1339
        return True
1340
  return disk.dev_type == constants.LD_LV
1341

    
1342

    
1343
class LUSetClusterParams(LogicalUnit):
1344
  """Change the parameters of the cluster.
1345

1346
  """
1347
  HPATH = "cluster-modify"
1348
  HTYPE = constants.HTYPE_CLUSTER
1349
  _OP_REQP = []
1350
  REQ_BGL = False
1351

    
1352
  def CheckParameters(self):
1353
    """Check parameters
1354

1355
    """
1356
    if not hasattr(self.op, "candidate_pool_size"):
1357
      self.op.candidate_pool_size = None
1358
    if self.op.candidate_pool_size is not None:
1359
      try:
1360
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1361
      except ValueError, err:
1362
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1363
                                   str(err))
1364
      if self.op.candidate_pool_size < 1:
1365
        raise errors.OpPrereqError("At least one master candidate needed")
1366

    
1367
  def ExpandNames(self):
1368
    # FIXME: in the future maybe other cluster params won't require checking on
1369
    # all nodes to be modified.
1370
    self.needed_locks = {
1371
      locking.LEVEL_NODE: locking.ALL_SET,
1372
    }
1373
    self.share_locks[locking.LEVEL_NODE] = 1
1374

    
1375
  def BuildHooksEnv(self):
1376
    """Build hooks env.
1377

1378
    """
1379
    env = {
1380
      "OP_TARGET": self.cfg.GetClusterName(),
1381
      "NEW_VG_NAME": self.op.vg_name,
1382
      }
1383
    mn = self.cfg.GetMasterNode()
1384
    return env, [mn], [mn]
1385

    
1386
  def CheckPrereq(self):
1387
    """Check prerequisites.
1388

1389
    This checks whether the given params don't conflict and
1390
    if the given volume group is valid.
1391

1392
    """
1393
    # FIXME: This only works because there is only one parameter that can be
1394
    # changed or removed.
1395
    if self.op.vg_name is not None and not self.op.vg_name:
1396
      instances = self.cfg.GetAllInstancesInfo().values()
1397
      for inst in instances:
1398
        for disk in inst.disks:
1399
          if _RecursiveCheckIfLVMBased(disk):
1400
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1401
                                       " lvm-based instances exist")
1402

    
1403
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1404

    
1405
    # if vg_name not None, checks given volume group on all nodes
1406
    if self.op.vg_name:
1407
      vglist = self.rpc.call_vg_list(node_list)
1408
      for node in node_list:
1409
        if vglist[node].failed:
1410
          # ignoring down node
1411
          self.LogWarning("Node %s unreachable/error, ignoring" % node)
1412
          continue
1413
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].data,
1414
                                              self.op.vg_name,
1415
                                              constants.MIN_VG_SIZE)
1416
        if vgstatus:
1417
          raise errors.OpPrereqError("Error on node '%s': %s" %
1418
                                     (node, vgstatus))
1419

    
1420
    self.cluster = cluster = self.cfg.GetClusterInfo()
1421
    # validate beparams changes
1422
    if self.op.beparams:
1423
      utils.CheckBEParams(self.op.beparams)
1424
      self.new_beparams = cluster.FillDict(
1425
        cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)
1426

    
1427
    # hypervisor list/parameters
1428
    self.new_hvparams = cluster.FillDict(cluster.hvparams, {})
1429
    if self.op.hvparams:
1430
      if not isinstance(self.op.hvparams, dict):
1431
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1432
      for hv_name, hv_dict in self.op.hvparams.items():
1433
        if hv_name not in self.new_hvparams:
1434
          self.new_hvparams[hv_name] = hv_dict
1435
        else:
1436
          self.new_hvparams[hv_name].update(hv_dict)
1437

    
1438
    if self.op.enabled_hypervisors is not None:
1439
      self.hv_list = self.op.enabled_hypervisors
1440
    else:
1441
      self.hv_list = cluster.enabled_hypervisors
1442

    
1443
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1444
      # either the enabled list has changed, or the parameters have, validate
1445
      for hv_name, hv_params in self.new_hvparams.items():
1446
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1447
            (self.op.enabled_hypervisors and
1448
             hv_name in self.op.enabled_hypervisors)):
1449
          # either this is a new hypervisor, or its parameters have changed
1450
          hv_class = hypervisor.GetHypervisor(hv_name)
1451
          hv_class.CheckParameterSyntax(hv_params)
1452
          _CheckHVParams(self, node_list, hv_name, hv_params)
1453

    
1454
  def Exec(self, feedback_fn):
1455
    """Change the parameters of the cluster.
1456

1457
    """
1458
    if self.op.vg_name is not None:
1459
      if self.op.vg_name != self.cfg.GetVGName():
1460
        self.cfg.SetVGName(self.op.vg_name)
1461
      else:
1462
        feedback_fn("Cluster LVM configuration already in desired"
1463
                    " state, not changing")
1464
    if self.op.hvparams:
1465
      self.cluster.hvparams = self.new_hvparams
1466
    if self.op.enabled_hypervisors is not None:
1467
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1468
    if self.op.beparams:
1469
      self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams
1470
    if self.op.candidate_pool_size is not None:
1471
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
1472

    
1473
    self.cfg.Update(self.cluster)
1474

    
1475
    # we want to update nodes after the cluster so that if any errors
1476
    # happen, we have recorded and saved the cluster info
1477
    if self.op.candidate_pool_size is not None:
1478
      _AdjustCandidatePool(self)
1479

    
1480

    
1481
class LURedistributeConfig(NoHooksLU):
1482
  """Force the redistribution of cluster configuration.
1483

1484
  This is a very simple LU.
1485

1486
  """
1487
  _OP_REQP = []
1488
  REQ_BGL = False
1489

    
1490
  def ExpandNames(self):
1491
    self.needed_locks = {
1492
      locking.LEVEL_NODE: locking.ALL_SET,
1493
    }
1494
    self.share_locks[locking.LEVEL_NODE] = 1
1495

    
1496
  def CheckPrereq(self):
1497
    """Check prerequisites.
1498

1499
    """
1500

    
1501
  def Exec(self, feedback_fn):
1502
    """Redistribute the configuration.
1503

1504
    """
1505
    self.cfg.Update(self.cfg.GetClusterInfo())
1506

    
1507

    
1508
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1509
  """Sleep and poll for an instance's disk to sync.
1510

1511
  """
1512
  if not instance.disks:
1513
    return True
1514

    
1515
  if not oneshot:
1516
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1517

    
1518
  node = instance.primary_node
1519

    
1520
  for dev in instance.disks:
1521
    lu.cfg.SetDiskID(dev, node)
1522

    
1523
  retries = 0
1524
  while True:
1525
    max_time = 0
1526
    done = True
1527
    cumul_degraded = False
1528
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1529
    if rstats.failed or not rstats.data:
1530
      lu.LogWarning("Can't get any data from node %s", node)
1531
      retries += 1
1532
      if retries >= 10:
1533
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1534
                                 " aborting." % node)
1535
      time.sleep(6)
1536
      continue
1537
    rstats = rstats.data
1538
    retries = 0
1539
    for i, mstat in enumerate(rstats):
1540
      if mstat is None:
1541
        lu.LogWarning("Can't compute data for node %s/%s",
1542
                           node, instance.disks[i].iv_name)
1543
        continue
1544
      # we ignore the ldisk parameter
1545
      perc_done, est_time, is_degraded, _ = mstat
1546
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1547
      if perc_done is not None:
1548
        done = False
1549
        if est_time is not None:
1550
          rem_time = "%d estimated seconds remaining" % est_time
1551
          max_time = est_time
1552
        else:
1553
          rem_time = "no time estimate"
1554
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1555
                        (instance.disks[i].iv_name, perc_done, rem_time))
1556
    if done or oneshot:
1557
      break
1558

    
1559
    time.sleep(min(60, max_time))
1560

    
1561
  if done:
1562
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1563
  return not cumul_degraded
1564

    
1565

    
1566
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1567
  """Check that mirrors are not degraded.
1568

1569
  The ldisk parameter, if True, will change the test from the
1570
  is_degraded attribute (which represents overall non-ok status for
1571
  the device(s)) to the ldisk (representing the local storage status).
1572

1573
  """
1574
  lu.cfg.SetDiskID(dev, node)
1575
  if ldisk:
1576
    idx = 6
1577
  else:
1578
    idx = 5
1579

    
1580
  result = True
1581
  if on_primary or dev.AssembleOnSecondary():
1582
    rstats = lu.rpc.call_blockdev_find(node, dev)
1583
    if rstats.failed or not rstats.data:
1584
      logging.warning("Node %s: disk degraded, not found or node down", node)
1585
      result = False
1586
    else:
1587
      result = result and (not rstats.data[idx])
1588
  if dev.children:
1589
    for child in dev.children:
1590
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1591

    
1592
  return result
1593

    
1594

    
1595
class LUDiagnoseOS(NoHooksLU):
1596
  """Logical unit for OS diagnose/query.
1597

1598
  """
1599
  _OP_REQP = ["output_fields", "names"]
1600
  REQ_BGL = False
1601
  _FIELDS_STATIC = utils.FieldSet()
1602
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
1603

    
1604
  def ExpandNames(self):
1605
    if self.op.names:
1606
      raise errors.OpPrereqError("Selective OS query not supported")
1607

    
1608
    _CheckOutputFields(static=self._FIELDS_STATIC,
1609
                       dynamic=self._FIELDS_DYNAMIC,
1610
                       selected=self.op.output_fields)
1611

    
1612
    # Lock all nodes, in shared mode
1613
    self.needed_locks = {}
1614
    self.share_locks[locking.LEVEL_NODE] = 1
1615
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1616

    
1617
  def CheckPrereq(self):
1618
    """Check prerequisites.
1619

1620
    """
1621

    
1622
  @staticmethod
1623
  def _DiagnoseByOS(node_list, rlist):
1624
    """Remaps a per-node return list into an a per-os per-node dictionary
1625

1626
    @param node_list: a list with the names of all nodes
1627
    @param rlist: a map with node names as keys and OS objects as values
1628

1629
    @rtype: dict
1630
    @returns: a dictionary with osnames as keys and as value another map, with
1631
        nodes as keys and list of OS objects as values, eg::
1632

1633
          {"debian-etch": {"node1": [<object>,...],
1634
                           "node2": [<object>,]}
1635
          }
1636

1637
    """
1638
    all_os = {}
1639
    for node_name, nr in rlist.iteritems():
1640
      if nr.failed or not nr.data:
1641
        continue
1642
      for os_obj in nr.data:
1643
        if os_obj.name not in all_os:
1644
          # build a list of nodes for this os containing empty lists
1645
          # for each node in node_list
1646
          all_os[os_obj.name] = {}
1647
          for nname in node_list:
1648
            all_os[os_obj.name][nname] = []
1649
        all_os[os_obj.name][node_name].append(os_obj)
1650
    return all_os
1651

    
1652
  def Exec(self, feedback_fn):
1653
    """Compute the list of OSes.
1654

1655
    """
1656
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1657
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()
1658
                   if node in node_list]
1659
    node_data = self.rpc.call_os_diagnose(valid_nodes)
1660
    if node_data == False:
1661
      raise errors.OpExecError("Can't gather the list of OSes")
1662
    pol = self._DiagnoseByOS(valid_nodes, node_data)
1663
    output = []
1664
    for os_name, os_data in pol.iteritems():
1665
      row = []
1666
      for field in self.op.output_fields:
1667
        if field == "name":
1668
          val = os_name
1669
        elif field == "valid":
1670
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1671
        elif field == "node_status":
1672
          val = {}
1673
          for node_name, nos_list in os_data.iteritems():
1674
            val[node_name] = [(v.status, v.path) for v in nos_list]
1675
        else:
1676
          raise errors.ParameterError(field)
1677
        row.append(val)
1678
      output.append(row)
1679

    
1680
    return output
1681

    
1682

    
1683
class LURemoveNode(LogicalUnit):
1684
  """Logical unit for removing a node.
1685

1686
  """
1687
  HPATH = "node-remove"
1688
  HTYPE = constants.HTYPE_NODE
1689
  _OP_REQP = ["node_name"]
1690

    
1691
  def BuildHooksEnv(self):
1692
    """Build hooks env.
1693

1694
    This doesn't run on the target node in the pre phase as a failed
1695
    node would then be impossible to remove.
1696

1697
    """
1698
    env = {
1699
      "OP_TARGET": self.op.node_name,
1700
      "NODE_NAME": self.op.node_name,
1701
      }
1702
    all_nodes = self.cfg.GetNodeList()
1703
    all_nodes.remove(self.op.node_name)
1704
    return env, all_nodes, all_nodes
1705

    
1706
  def CheckPrereq(self):
1707
    """Check prerequisites.
1708

1709
    This checks:
1710
     - the node exists in the configuration
1711
     - it does not have primary or secondary instances
1712
     - it's not the master
1713

1714
    Any errors are signalled by raising errors.OpPrereqError.
1715

1716
    """
1717
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1718
    if node is None:
1719
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1720

    
1721
    instance_list = self.cfg.GetInstanceList()
1722

    
1723
    masternode = self.cfg.GetMasterNode()
1724
    if node.name == masternode:
1725
      raise errors.OpPrereqError("Node is the master node,"
1726
                                 " you need to failover first.")
1727

    
1728
    for instance_name in instance_list:
1729
      instance = self.cfg.GetInstanceInfo(instance_name)
1730
      if node.name in instance.all_nodes:
1731
        raise errors.OpPrereqError("Instance %s is still running on the node,"
1732
                                   " please remove first." % instance_name)
1733
    self.op.node_name = node.name
1734
    self.node = node
1735

    
1736
  def Exec(self, feedback_fn):
1737
    """Removes the node from the cluster.
1738

1739
    """
1740
    node = self.node
1741
    logging.info("Stopping the node daemon and removing configs from node %s",
1742
                 node.name)
1743

    
1744
    self.context.RemoveNode(node.name)
1745

    
1746
    self.rpc.call_node_leave_cluster(node.name)
1747

    
1748
    # Promote nodes to master candidate as needed
1749
    _AdjustCandidatePool(self)
1750

    
1751

    
1752
class LUQueryNodes(NoHooksLU):
1753
  """Logical unit for querying nodes.
1754

1755
  """
1756
  _OP_REQP = ["output_fields", "names", "use_locking"]
1757
  REQ_BGL = False
1758
  _FIELDS_DYNAMIC = utils.FieldSet(
1759
    "dtotal", "dfree",
1760
    "mtotal", "mnode", "mfree",
1761
    "bootid",
1762
    "ctotal",
1763
    )
1764

    
1765
  _FIELDS_STATIC = utils.FieldSet(
1766
    "name", "pinst_cnt", "sinst_cnt",
1767
    "pinst_list", "sinst_list",
1768
    "pip", "sip", "tags",
1769
    "serial_no",
1770
    "master_candidate",
1771
    "master",
1772
    "offline",
1773
    )
1774

    
1775
  def ExpandNames(self):
1776
    _CheckOutputFields(static=self._FIELDS_STATIC,
1777
                       dynamic=self._FIELDS_DYNAMIC,
1778
                       selected=self.op.output_fields)
1779

    
1780
    self.needed_locks = {}
1781
    self.share_locks[locking.LEVEL_NODE] = 1
1782

    
1783
    if self.op.names:
1784
      self.wanted = _GetWantedNodes(self, self.op.names)
1785
    else:
1786
      self.wanted = locking.ALL_SET
1787

    
1788
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
1789
    self.do_locking = self.do_node_query and self.op.use_locking
1790
    if self.do_locking:
1791
      # if we don't request only static fields, we need to lock the nodes
1792
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
1793

    
1794

    
1795
  def CheckPrereq(self):
1796
    """Check prerequisites.
1797

1798
    """
1799
    # The validation of the node list is done in the _GetWantedNodes,
1800
    # if non empty, and if empty, there's no validation to do
1801
    pass
1802

    
1803
  def Exec(self, feedback_fn):
1804
    """Computes the list of nodes and their attributes.
1805

1806
    """
1807
    all_info = self.cfg.GetAllNodesInfo()
1808
    if self.do_locking:
1809
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
1810
    elif self.wanted != locking.ALL_SET:
1811
      nodenames = self.wanted
1812
      missing = set(nodenames).difference(all_info.keys())
1813
      if missing:
1814
        raise errors.OpExecError(
1815
          "Some nodes were removed before retrieving their data: %s" % missing)
1816
    else:
1817
      nodenames = all_info.keys()
1818

    
1819
    nodenames = utils.NiceSort(nodenames)
1820
    nodelist = [all_info[name] for name in nodenames]
1821

    
1822
    # begin data gathering
1823

    
1824
    if self.do_node_query:
1825
      live_data = {}
1826
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
1827
                                          self.cfg.GetHypervisorType())
1828
      for name in nodenames:
1829
        nodeinfo = node_data[name]
1830
        if not nodeinfo.failed and nodeinfo.data:
1831
          nodeinfo = nodeinfo.data
1832
          fn = utils.TryConvert
1833
          live_data[name] = {
1834
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
1835
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
1836
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
1837
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
1838
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
1839
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
1840
            "bootid": nodeinfo.get('bootid', None),
1841
            }
1842
        else:
1843
          live_data[name] = {}
1844
    else:
1845
      live_data = dict.fromkeys(nodenames, {})
1846

    
1847
    node_to_primary = dict([(name, set()) for name in nodenames])
1848
    node_to_secondary = dict([(name, set()) for name in nodenames])
1849

    
1850
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1851
                             "sinst_cnt", "sinst_list"))
1852
    if inst_fields & frozenset(self.op.output_fields):
1853
      instancelist = self.cfg.GetInstanceList()
1854

    
1855
      for instance_name in instancelist:
1856
        inst = self.cfg.GetInstanceInfo(instance_name)
1857
        if inst.primary_node in node_to_primary:
1858
          node_to_primary[inst.primary_node].add(inst.name)
1859
        for secnode in inst.secondary_nodes:
1860
          if secnode in node_to_secondary:
1861
            node_to_secondary[secnode].add(inst.name)
1862

    
1863
    master_node = self.cfg.GetMasterNode()
1864

    
1865
    # end data gathering
1866

    
1867
    output = []
1868
    for node in nodelist:
1869
      node_output = []
1870
      for field in self.op.output_fields:
1871
        if field == "name":
1872
          val = node.name
1873
        elif field == "pinst_list":
1874
          val = list(node_to_primary[node.name])
1875
        elif field == "sinst_list":
1876
          val = list(node_to_secondary[node.name])
1877
        elif field == "pinst_cnt":
1878
          val = len(node_to_primary[node.name])
1879
        elif field == "sinst_cnt":
1880
          val = len(node_to_secondary[node.name])
1881
        elif field == "pip":
1882
          val = node.primary_ip
1883
        elif field == "sip":
1884
          val = node.secondary_ip
1885
        elif field == "tags":
1886
          val = list(node.GetTags())
1887
        elif field == "serial_no":
1888
          val = node.serial_no
1889
        elif field == "master_candidate":
1890
          val = node.master_candidate
1891
        elif field == "master":
1892
          val = node.name == master_node
1893
        elif field == "offline":
1894
          val = node.offline
1895
        elif self._FIELDS_DYNAMIC.Matches(field):
1896
          val = live_data[node.name].get(field, None)
1897
        else:
1898
          raise errors.ParameterError(field)
1899
        node_output.append(val)
1900
      output.append(node_output)
1901

    
1902
    return output
1903

    
1904

    
1905
class LUQueryNodeVolumes(NoHooksLU):
1906
  """Logical unit for getting volumes on node(s).
1907

1908
  """
1909
  _OP_REQP = ["nodes", "output_fields"]
1910
  REQ_BGL = False
1911
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
1912
  _FIELDS_STATIC = utils.FieldSet("node")
1913

    
1914
  def ExpandNames(self):
1915
    _CheckOutputFields(static=self._FIELDS_STATIC,
1916
                       dynamic=self._FIELDS_DYNAMIC,
1917
                       selected=self.op.output_fields)
1918

    
1919
    self.needed_locks = {}
1920
    self.share_locks[locking.LEVEL_NODE] = 1
1921
    if not self.op.nodes:
1922
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1923
    else:
1924
      self.needed_locks[locking.LEVEL_NODE] = \
1925
        _GetWantedNodes(self, self.op.nodes)
1926

    
1927
  def CheckPrereq(self):
1928
    """Check prerequisites.
1929

1930
    This checks that the fields required are valid output fields.
1931

1932
    """
1933
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
1934

    
1935
  def Exec(self, feedback_fn):
1936
    """Computes the list of nodes and their attributes.
1937

1938
    """
1939
    nodenames = self.nodes
1940
    volumes = self.rpc.call_node_volumes(nodenames)
1941

    
1942
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1943
             in self.cfg.GetInstanceList()]
1944

    
1945
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1946

    
1947
    output = []
1948
    for node in nodenames:
1949
      if node not in volumes or volumes[node].failed or not volumes[node].data:
1950
        continue
1951

    
1952
      node_vols = volumes[node].data[:]
1953
      node_vols.sort(key=lambda vol: vol['dev'])
1954

    
1955
      for vol in node_vols:
1956
        node_output = []
1957
        for field in self.op.output_fields:
1958
          if field == "node":
1959
            val = node
1960
          elif field == "phys":
1961
            val = vol['dev']
1962
          elif field == "vg":
1963
            val = vol['vg']
1964
          elif field == "name":
1965
            val = vol['name']
1966
          elif field == "size":
1967
            val = int(float(vol['size']))
1968
          elif field == "instance":
1969
            for inst in ilist:
1970
              if node not in lv_by_node[inst]:
1971
                continue
1972
              if vol['name'] in lv_by_node[inst][node]:
1973
                val = inst.name
1974
                break
1975
            else:
1976
              val = '-'
1977
          else:
1978
            raise errors.ParameterError(field)
1979
          node_output.append(str(val))
1980

    
1981
        output.append(node_output)
1982

    
1983
    return output
1984

    
1985

    
1986
class LUAddNode(LogicalUnit):
1987
  """Logical unit for adding node to the cluster.
1988

1989
  """
1990
  HPATH = "node-add"
1991
  HTYPE = constants.HTYPE_NODE
1992
  _OP_REQP = ["node_name"]
1993

    
1994
  def BuildHooksEnv(self):
1995
    """Build hooks env.
1996

1997
    This will run on all nodes before, and on all nodes + the new node after.
1998

1999
    """
2000
    env = {
2001
      "OP_TARGET": self.op.node_name,
2002
      "NODE_NAME": self.op.node_name,
2003
      "NODE_PIP": self.op.primary_ip,
2004
      "NODE_SIP": self.op.secondary_ip,
2005
      }
2006
    nodes_0 = self.cfg.GetNodeList()
2007
    nodes_1 = nodes_0 + [self.op.node_name, ]
2008
    return env, nodes_0, nodes_1
2009

    
2010
  def CheckPrereq(self):
2011
    """Check prerequisites.
2012

2013
    This checks:
2014
     - the new node is not already in the config
2015
     - it is resolvable
2016
     - its parameters (single/dual homed) matches the cluster
2017

2018
    Any errors are signalled by raising errors.OpPrereqError.
2019

2020
    """
2021
    node_name = self.op.node_name
2022
    cfg = self.cfg
2023

    
2024
    dns_data = utils.HostInfo(node_name)
2025

    
2026
    node = dns_data.name
2027
    primary_ip = self.op.primary_ip = dns_data.ip
2028
    secondary_ip = getattr(self.op, "secondary_ip", None)
2029
    if secondary_ip is None:
2030
      secondary_ip = primary_ip
2031
    if not utils.IsValidIP(secondary_ip):
2032
      raise errors.OpPrereqError("Invalid secondary IP given")
2033
    self.op.secondary_ip = secondary_ip
2034

    
2035
    node_list = cfg.GetNodeList()
2036
    if not self.op.readd and node in node_list:
2037
      raise errors.OpPrereqError("Node %s is already in the configuration" %
2038
                                 node)
2039
    elif self.op.readd and node not in node_list:
2040
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
2041

    
2042
    for existing_node_name in node_list:
2043
      existing_node = cfg.GetNodeInfo(existing_node_name)
2044

    
2045
      if self.op.readd and node == existing_node_name:
2046
        if (existing_node.primary_ip != primary_ip or
2047
            existing_node.secondary_ip != secondary_ip):
2048
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
2049
                                     " address configuration as before")
2050
        continue
2051

    
2052
      if (existing_node.primary_ip == primary_ip or
2053
          existing_node.secondary_ip == primary_ip or
2054
          existing_node.primary_ip == secondary_ip or
2055
          existing_node.secondary_ip == secondary_ip):
2056
        raise errors.OpPrereqError("New node ip address(es) conflict with"
2057
                                   " existing node %s" % existing_node.name)
2058

    
2059
    # check that the type of the node (single versus dual homed) is the
2060
    # same as for the master
2061
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
2062
    master_singlehomed = myself.secondary_ip == myself.primary_ip
2063
    newbie_singlehomed = secondary_ip == primary_ip
2064
    if master_singlehomed != newbie_singlehomed:
2065
      if master_singlehomed:
2066
        raise errors.OpPrereqError("The master has no private ip but the"
2067
                                   " new node has one")
2068
      else:
2069
        raise errors.OpPrereqError("The master has a private ip but the"
2070
                                   " new node doesn't have one")
2071

    
2072
    # checks reachablity
2073
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
2074
      raise errors.OpPrereqError("Node not reachable by ping")
2075

    
2076
    if not newbie_singlehomed:
2077
      # check reachability from my secondary ip to newbie's secondary ip
2078
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
2079
                           source=myself.secondary_ip):
2080
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
2081
                                   " based ping to noded port")
2082

    
2083
    cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2084
    mc_now, _ = self.cfg.GetMasterCandidateStats()
2085
    master_candidate = mc_now < cp_size
2086

    
2087
    self.new_node = objects.Node(name=node,
2088
                                 primary_ip=primary_ip,
2089
                                 secondary_ip=secondary_ip,
2090
                                 master_candidate=master_candidate,
2091
                                 offline=False)
2092

    
2093
  def Exec(self, feedback_fn):
2094
    """Adds the new node to the cluster.
2095

2096
    """
2097
    new_node = self.new_node
2098
    node = new_node.name
2099

    
2100
    # check connectivity
2101
    result = self.rpc.call_version([node])[node]
2102
    result.Raise()
2103
    if result.data:
2104
      if constants.PROTOCOL_VERSION == result.data:
2105
        logging.info("Communication to node %s fine, sw version %s match",
2106
                     node, result.data)
2107
      else:
2108
        raise errors.OpExecError("Version mismatch master version %s,"
2109
                                 " node version %s" %
2110
                                 (constants.PROTOCOL_VERSION, result.data))
2111
    else:
2112
      raise errors.OpExecError("Cannot get version from the new node")
2113

    
2114
    # setup ssh on node
2115
    logging.info("Copy ssh key to node %s", node)
2116
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
2117
    keyarray = []
2118
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
2119
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
2120
                priv_key, pub_key]
2121

    
2122
    for i in keyfiles:
2123
      f = open(i, 'r')
2124
      try:
2125
        keyarray.append(f.read())
2126
      finally:
2127
        f.close()
2128

    
2129
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
2130
                                    keyarray[2],
2131
                                    keyarray[3], keyarray[4], keyarray[5])
2132

    
2133
    msg = result.RemoteFailMsg()
2134
    if msg:
2135
      raise errors.OpExecError("Cannot transfer ssh keys to the"
2136
                               " new node: %s" % msg)
2137

    
2138
    # Add node to our /etc/hosts, and add key to known_hosts
2139
    utils.AddHostToEtcHosts(new_node.name)
2140

    
2141
    if new_node.secondary_ip != new_node.primary_ip:
2142
      result = self.rpc.call_node_has_ip_address(new_node.name,
2143
                                                 new_node.secondary_ip)
2144
      if result.failed or not result.data:
2145
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
2146
                                 " you gave (%s). Please fix and re-run this"
2147
                                 " command." % new_node.secondary_ip)
2148

    
2149
    node_verify_list = [self.cfg.GetMasterNode()]
2150
    node_verify_param = {
2151
      'nodelist': [node],
2152
      # TODO: do a node-net-test as well?
2153
    }
2154

    
2155
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
2156
                                       self.cfg.GetClusterName())
2157
    for verifier in node_verify_list:
2158
      if result[verifier].failed or not result[verifier].data:
2159
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
2160
                                 " for remote verification" % verifier)
2161
      if result[verifier].data['nodelist']:
2162
        for failed in result[verifier].data['nodelist']:
2163
          feedback_fn("ssh/hostname verification failed %s -> %s" %
2164
                      (verifier, result[verifier].data['nodelist'][failed]))
2165
        raise errors.OpExecError("ssh/hostname verification failed.")
2166

    
2167
    # Distribute updated /etc/hosts and known_hosts to all nodes,
2168
    # including the node just added
2169
    myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
2170
    dist_nodes = self.cfg.GetNodeList()
2171
    if not self.op.readd:
2172
      dist_nodes.append(node)
2173
    if myself.name in dist_nodes:
2174
      dist_nodes.remove(myself.name)
2175

    
2176
    logging.debug("Copying hosts and known_hosts to all nodes")
2177
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
2178
      result = self.rpc.call_upload_file(dist_nodes, fname)
2179
      for to_node, to_result in result.iteritems():
2180
        if to_result.failed or not to_result.data:
2181
          logging.error("Copy of file %s to node %s failed", fname, to_node)
2182

    
2183
    to_copy = []
2184
    enabled_hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
2185
    if constants.HTS_USE_VNC.intersection(enabled_hypervisors):
2186
      to_copy.append(constants.VNC_PASSWORD_FILE)
2187

    
2188
    for fname in to_copy:
2189
      result = self.rpc.call_upload_file([node], fname)
2190
      if result[node].failed or not result[node]:
2191
        logging.error("Could not copy file %s to node %s", fname, node)
2192

    
2193
    if self.op.readd:
2194
      self.context.ReaddNode(new_node)
2195
    else:
2196
      self.context.AddNode(new_node)
2197

    
2198

    
2199
class LUSetNodeParams(LogicalUnit):
2200
  """Modifies the parameters of a node.
2201

2202
  """
2203
  HPATH = "node-modify"
2204
  HTYPE = constants.HTYPE_NODE
2205
  _OP_REQP = ["node_name"]
2206
  REQ_BGL = False
2207

    
2208
  def CheckArguments(self):
2209
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2210
    if node_name is None:
2211
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2212
    self.op.node_name = node_name
2213
    _CheckBooleanOpField(self.op, 'master_candidate')
2214
    _CheckBooleanOpField(self.op, 'offline')
2215
    if self.op.master_candidate is None and self.op.offline is None:
2216
      raise errors.OpPrereqError("Please pass at least one modification")
2217
    if self.op.offline == True and self.op.master_candidate == True:
2218
      raise errors.OpPrereqError("Can't set the node into offline and"
2219
                                 " master_candidate at the same time")
2220

    
2221
  def ExpandNames(self):
2222
    self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
2223

    
2224
  def BuildHooksEnv(self):
2225
    """Build hooks env.
2226

2227
    This runs on the master node.
2228

2229
    """
2230
    env = {
2231
      "OP_TARGET": self.op.node_name,
2232
      "MASTER_CANDIDATE": str(self.op.master_candidate),
2233
      "OFFLINE": str(self.op.offline),
2234
      }
2235
    nl = [self.cfg.GetMasterNode(),
2236
          self.op.node_name]
2237
    return env, nl, nl
2238

    
2239
  def CheckPrereq(self):
2240
    """Check prerequisites.
2241

2242
    This only checks the instance list against the existing names.
2243

2244
    """
2245
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
2246

    
2247
    if ((self.op.master_candidate == False or self.op.offline == True)
2248
        and node.master_candidate):
2249
      # we will demote the node from master_candidate
2250
      if self.op.node_name == self.cfg.GetMasterNode():
2251
        raise errors.OpPrereqError("The master node has to be a"
2252
                                   " master candidate and online")
2253
      cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2254
      num_candidates, _ = self.cfg.GetMasterCandidateStats()
2255
      if num_candidates <= cp_size:
2256
        msg = ("Not enough master candidates (desired"
2257
               " %d, new value will be %d)" % (cp_size, num_candidates-1))
2258
        if self.op.force:
2259
          self.LogWarning(msg)
2260
        else:
2261
          raise errors.OpPrereqError(msg)
2262

    
2263
    if (self.op.master_candidate == True and node.offline and
2264
        not self.op.offline == False):
2265
      raise errors.OpPrereqError("Can't set an offline node to"
2266
                                 " master_candidate")
2267

    
2268
    return
2269

    
2270
  def Exec(self, feedback_fn):
2271
    """Modifies a node.
2272

2273
    """
2274
    node = self.node
2275

    
2276
    result = []
2277

    
2278
    if self.op.offline is not None:
2279
      node.offline = self.op.offline
2280
      result.append(("offline", str(self.op.offline)))
2281
      if self.op.offline == True and node.master_candidate:
2282
        node.master_candidate = False
2283
        result.append(("master_candidate", "auto-demotion due to offline"))
2284

    
2285
    if self.op.master_candidate is not None:
2286
      node.master_candidate = self.op.master_candidate
2287
      result.append(("master_candidate", str(self.op.master_candidate)))
2288
      if self.op.master_candidate == False:
2289
        rrc = self.rpc.call_node_demote_from_mc(node.name)
2290
        if (rrc.failed or not isinstance(rrc.data, (tuple, list))
2291
            or len(rrc.data) != 2):
2292
          self.LogWarning("Node rpc error: %s" % rrc.error)
2293
        elif not rrc.data[0]:
2294
          self.LogWarning("Node failed to demote itself: %s" % rrc.data[1])
2295

    
2296
    # this will trigger configuration file update, if needed
2297
    self.cfg.Update(node)
2298
    # this will trigger job queue propagation or cleanup
2299
    if self.op.node_name != self.cfg.GetMasterNode():
2300
      self.context.ReaddNode(node)
2301

    
2302
    return result
2303

    
2304

    
2305
class LUQueryClusterInfo(NoHooksLU):
2306
  """Query cluster configuration.
2307

2308
  """
2309
  _OP_REQP = []
2310
  REQ_BGL = False
2311

    
2312
  def ExpandNames(self):
2313
    self.needed_locks = {}
2314

    
2315
  def CheckPrereq(self):
2316
    """No prerequsites needed for this LU.
2317

2318
    """
2319
    pass
2320

    
2321
  def Exec(self, feedback_fn):
2322
    """Return cluster config.
2323

2324
    """
2325
    cluster = self.cfg.GetClusterInfo()
2326
    result = {
2327
      "software_version": constants.RELEASE_VERSION,
2328
      "protocol_version": constants.PROTOCOL_VERSION,
2329
      "config_version": constants.CONFIG_VERSION,
2330
      "os_api_version": constants.OS_API_VERSION,
2331
      "export_version": constants.EXPORT_VERSION,
2332
      "architecture": (platform.architecture()[0], platform.machine()),
2333
      "name": cluster.cluster_name,
2334
      "master": cluster.master_node,
2335
      "default_hypervisor": cluster.default_hypervisor,
2336
      "enabled_hypervisors": cluster.enabled_hypervisors,
2337
      "hvparams": dict([(hypervisor, cluster.hvparams[hypervisor])
2338
                        for hypervisor in cluster.enabled_hypervisors]),
2339
      "beparams": cluster.beparams,
2340
      "candidate_pool_size": cluster.candidate_pool_size,
2341
      }
2342

    
2343
    return result
2344

    
2345

    
2346
class LUQueryConfigValues(NoHooksLU):
2347
  """Return configuration values.
2348

2349
  """
2350
  _OP_REQP = []
2351
  REQ_BGL = False
2352
  _FIELDS_DYNAMIC = utils.FieldSet()
2353
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag")
2354

    
2355
  def ExpandNames(self):
2356
    self.needed_locks = {}
2357

    
2358
    _CheckOutputFields(static=self._FIELDS_STATIC,
2359
                       dynamic=self._FIELDS_DYNAMIC,
2360
                       selected=self.op.output_fields)
2361

    
2362
  def CheckPrereq(self):
2363
    """No prerequisites.
2364

2365
    """
2366
    pass
2367

    
2368
  def Exec(self, feedback_fn):
2369
    """Dump a representation of the cluster config to the standard output.
2370

2371
    """
2372
    values = []
2373
    for field in self.op.output_fields:
2374
      if field == "cluster_name":
2375
        entry = self.cfg.GetClusterName()
2376
      elif field == "master_node":
2377
        entry = self.cfg.GetMasterNode()
2378
      elif field == "drain_flag":
2379
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2380
      else:
2381
        raise errors.ParameterError(field)
2382
      values.append(entry)
2383
    return values
2384

    
2385

    
2386
class LUActivateInstanceDisks(NoHooksLU):
2387
  """Bring up an instance's disks.
2388

2389
  """
2390
  _OP_REQP = ["instance_name"]
2391
  REQ_BGL = False
2392

    
2393
  def ExpandNames(self):
2394
    self._ExpandAndLockInstance()
2395
    self.needed_locks[locking.LEVEL_NODE] = []
2396
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2397

    
2398
  def DeclareLocks(self, level):
2399
    if level == locking.LEVEL_NODE:
2400
      self._LockInstancesNodes()
2401

    
2402
  def CheckPrereq(self):
2403
    """Check prerequisites.
2404

2405
    This checks that the instance is in the cluster.
2406

2407
    """
2408
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2409
    assert self.instance is not None, \
2410
      "Cannot retrieve locked instance %s" % self.op.instance_name
2411
    _CheckNodeOnline(self, self.instance.primary_node)
2412

    
2413
  def Exec(self, feedback_fn):
2414
    """Activate the disks.
2415

2416
    """
2417
    disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
2418
    if not disks_ok:
2419
      raise errors.OpExecError("Cannot activate block devices")
2420

    
2421
    return disks_info
2422

    
2423

    
2424
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
2425
  """Prepare the block devices for an instance.
2426

2427
  This sets up the block devices on all nodes.
2428

2429
  @type lu: L{LogicalUnit}
2430
  @param lu: the logical unit on whose behalf we execute
2431
  @type instance: L{objects.Instance}
2432
  @param instance: the instance for whose disks we assemble
2433
  @type ignore_secondaries: boolean
2434
  @param ignore_secondaries: if true, errors on secondary nodes
2435
      won't result in an error return from the function
2436
  @return: False if the operation failed, otherwise a list of
2437
      (host, instance_visible_name, node_visible_name)
2438
      with the mapping from node devices to instance devices
2439

2440
  """
2441
  device_info = []
2442
  disks_ok = True
2443
  iname = instance.name
2444
  # With the two passes mechanism we try to reduce the window of
2445
  # opportunity for the race condition of switching DRBD to primary
2446
  # before handshaking occured, but we do not eliminate it
2447

    
2448
  # The proper fix would be to wait (with some limits) until the
2449
  # connection has been made and drbd transitions from WFConnection
2450
  # into any other network-connected state (Connected, SyncTarget,
2451
  # SyncSource, etc.)
2452

    
2453
  # 1st pass, assemble on all nodes in secondary mode
2454
  for inst_disk in instance.disks:
2455
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2456
      lu.cfg.SetDiskID(node_disk, node)
2457
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2458
      if result.failed or not result:
2459
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2460
                           " (is_primary=False, pass=1)",
2461
                           inst_disk.iv_name, node)
2462
        if not ignore_secondaries:
2463
          disks_ok = False
2464

    
2465
  # FIXME: race condition on drbd migration to primary
2466

    
2467
  # 2nd pass, do only the primary node
2468
  for inst_disk in instance.disks:
2469
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2470
      if node != instance.primary_node:
2471
        continue
2472
      lu.cfg.SetDiskID(node_disk, node)
2473
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2474
      if result.failed or not result:
2475
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2476
                           " (is_primary=True, pass=2)",
2477
                           inst_disk.iv_name, node)
2478
        disks_ok = False
2479
    device_info.append((instance.primary_node, inst_disk.iv_name, result.data))
2480

    
2481
  # leave the disks configured for the primary node
2482
  # this is a workaround that would be fixed better by
2483
  # improving the logical/physical id handling
2484
  for disk in instance.disks:
2485
    lu.cfg.SetDiskID(disk, instance.primary_node)
2486

    
2487
  return disks_ok, device_info
2488

    
2489

    
2490
def _StartInstanceDisks(lu, instance, force):
2491
  """Start the disks of an instance.
2492

2493
  """
2494
  disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2495
                                           ignore_secondaries=force)
2496
  if not disks_ok:
2497
    _ShutdownInstanceDisks(lu, instance)
2498
    if force is not None and not force:
2499
      lu.proc.LogWarning("", hint="If the message above refers to a"
2500
                         " secondary node,"
2501
                         " you can retry the operation using '--force'.")
2502
    raise errors.OpExecError("Disk consistency error")
2503

    
2504

    
2505
class LUDeactivateInstanceDisks(NoHooksLU):
2506
  """Shutdown an instance's disks.
2507

2508
  """
2509
  _OP_REQP = ["instance_name"]
2510
  REQ_BGL = False
2511

    
2512
  def ExpandNames(self):
2513
    self._ExpandAndLockInstance()
2514
    self.needed_locks[locking.LEVEL_NODE] = []
2515
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2516

    
2517
  def DeclareLocks(self, level):
2518
    if level == locking.LEVEL_NODE:
2519
      self._LockInstancesNodes()
2520

    
2521
  def CheckPrereq(self):
2522
    """Check prerequisites.
2523

2524
    This checks that the instance is in the cluster.
2525

2526
    """
2527
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2528
    assert self.instance is not None, \
2529
      "Cannot retrieve locked instance %s" % self.op.instance_name
2530

    
2531
  def Exec(self, feedback_fn):
2532
    """Deactivate the disks
2533

2534
    """
2535
    instance = self.instance
2536
    _SafeShutdownInstanceDisks(self, instance)
2537

    
2538

    
2539
def _SafeShutdownInstanceDisks(lu, instance):
2540
  """Shutdown block devices of an instance.
2541

2542
  This function checks if an instance is running, before calling
2543
  _ShutdownInstanceDisks.
2544

2545
  """
2546
  ins_l = lu.rpc.call_instance_list([instance.primary_node],
2547
                                      [instance.hypervisor])
2548
  ins_l = ins_l[instance.primary_node]
2549
  if ins_l.failed or not isinstance(ins_l.data, list):
2550
    raise errors.OpExecError("Can't contact node '%s'" %
2551
                             instance.primary_node)
2552

    
2553
  if instance.name in ins_l.data:
2554
    raise errors.OpExecError("Instance is running, can't shutdown"
2555
                             " block devices.")
2556

    
2557
  _ShutdownInstanceDisks(lu, instance)
2558

    
2559

    
2560
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2561
  """Shutdown block devices of an instance.
2562

2563
  This does the shutdown on all nodes of the instance.
2564

2565
  If the ignore_primary is false, errors on the primary node are
2566
  ignored.
2567

2568
  """
2569
  result = True
2570
  for disk in instance.disks:
2571
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2572
      lu.cfg.SetDiskID(top_disk, node)
2573
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
2574
      if result.failed or not result.data:
2575
        logging.error("Could not shutdown block device %s on node %s",
2576
                      disk.iv_name, node)
2577
        if not ignore_primary or node != instance.primary_node:
2578
          result = False
2579
  return result
2580

    
2581

    
2582
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
2583
  """Checks if a node has enough free memory.
2584

2585
  This function check if a given node has the needed amount of free
2586
  memory. In case the node has less memory or we cannot get the
2587
  information from the node, this function raise an OpPrereqError
2588
  exception.
2589

2590
  @type lu: C{LogicalUnit}
2591
  @param lu: a logical unit from which we get configuration data
2592
  @type node: C{str}
2593
  @param node: the node to check
2594
  @type reason: C{str}
2595
  @param reason: string to use in the error message
2596
  @type requested: C{int}
2597
  @param requested: the amount of memory in MiB to check for
2598
  @type hypervisor_name: C{str}
2599
  @param hypervisor_name: the hypervisor to ask for memory stats
2600
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2601
      we cannot check the node
2602

2603
  """
2604
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
2605
  nodeinfo[node].Raise()
2606
  free_mem = nodeinfo[node].data.get('memory_free')
2607
  if not isinstance(free_mem, int):
2608
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2609
                             " was '%s'" % (node, free_mem))
2610
  if requested > free_mem:
2611
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2612
                             " needed %s MiB, available %s MiB" %
2613
                             (node, reason, requested, free_mem))
2614

    
2615

    
2616
class LUStartupInstance(LogicalUnit):
2617
  """Starts an instance.
2618

2619
  """
2620
  HPATH = "instance-start"
2621
  HTYPE = constants.HTYPE_INSTANCE
2622
  _OP_REQP = ["instance_name", "force"]
2623
  REQ_BGL = False
2624

    
2625
  def ExpandNames(self):
2626
    self._ExpandAndLockInstance()
2627

    
2628
  def BuildHooksEnv(self):
2629
    """Build hooks env.
2630

2631
    This runs on master, primary and secondary nodes of the instance.
2632

2633
    """
2634
    env = {
2635
      "FORCE": self.op.force,
2636
      }
2637
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2638
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2639
    return env, nl, nl
2640

    
2641
  def CheckPrereq(self):
2642
    """Check prerequisites.
2643

2644
    This checks that the instance is in the cluster.
2645

2646
    """
2647
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2648
    assert self.instance is not None, \
2649
      "Cannot retrieve locked instance %s" % self.op.instance_name
2650

    
2651
    _CheckNodeOnline(self, instance.primary_node)
2652

    
2653
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2654
    # check bridges existance
2655
    _CheckInstanceBridgesExist(self, instance)
2656

    
2657
    _CheckNodeFreeMemory(self, instance.primary_node,
2658
                         "starting instance %s" % instance.name,
2659
                         bep[constants.BE_MEMORY], instance.hypervisor)
2660

    
2661
  def Exec(self, feedback_fn):
2662
    """Start the instance.
2663

2664
    """
2665
    instance = self.instance
2666
    force = self.op.force
2667
    extra_args = getattr(self.op, "extra_args", "")
2668

    
2669
    self.cfg.MarkInstanceUp(instance.name)
2670

    
2671
    node_current = instance.primary_node
2672

    
2673
    _StartInstanceDisks(self, instance, force)
2674

    
2675
    result = self.rpc.call_instance_start(node_current, instance, extra_args)
2676
    msg = result.RemoteFailMsg()
2677
    if msg:
2678
      _ShutdownInstanceDisks(self, instance)
2679
      raise errors.OpExecError("Could not start instance: %s" % msg)
2680

    
2681

    
2682
class LURebootInstance(LogicalUnit):
2683
  """Reboot an instance.
2684

2685
  """
2686
  HPATH = "instance-reboot"
2687
  HTYPE = constants.HTYPE_INSTANCE
2688
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2689
  REQ_BGL = False
2690

    
2691
  def ExpandNames(self):
2692
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2693
                                   constants.INSTANCE_REBOOT_HARD,
2694
                                   constants.INSTANCE_REBOOT_FULL]:
2695
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2696
                                  (constants.INSTANCE_REBOOT_SOFT,
2697
                                   constants.INSTANCE_REBOOT_HARD,
2698
                                   constants.INSTANCE_REBOOT_FULL))
2699
    self._ExpandAndLockInstance()
2700

    
2701
  def BuildHooksEnv(self):
2702
    """Build hooks env.
2703

2704
    This runs on master, primary and secondary nodes of the instance.
2705

2706
    """
2707
    env = {
2708
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2709
      }
2710
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2711
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2712
    return env, nl, nl
2713

    
2714
  def CheckPrereq(self):
2715
    """Check prerequisites.
2716

2717
    This checks that the instance is in the cluster.
2718

2719
    """
2720
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2721
    assert self.instance is not None, \
2722
      "Cannot retrieve locked instance %s" % self.op.instance_name
2723

    
2724
    _CheckNodeOnline(self, instance.primary_node)
2725

    
2726
    # check bridges existance
2727
    _CheckInstanceBridgesExist(self, instance)
2728

    
2729
  def Exec(self, feedback_fn):
2730
    """Reboot the instance.
2731

2732
    """
2733
    instance = self.instance
2734
    ignore_secondaries = self.op.ignore_secondaries
2735
    reboot_type = self.op.reboot_type
2736
    extra_args = getattr(self.op, "extra_args", "")
2737

    
2738
    node_current = instance.primary_node
2739

    
2740
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2741
                       constants.INSTANCE_REBOOT_HARD]:
2742
      result = self.rpc.call_instance_reboot(node_current, instance,
2743
                                             reboot_type, extra_args)
2744
      if result.failed or not result.data:
2745
        raise errors.OpExecError("Could not reboot instance")
2746
    else:
2747
      if not self.rpc.call_instance_shutdown(node_current, instance):
2748
        raise errors.OpExecError("could not shutdown instance for full reboot")
2749
      _ShutdownInstanceDisks(self, instance)
2750
      _StartInstanceDisks(self, instance, ignore_secondaries)
2751
      result = self.rpc.call_instance_start(node_current, instance, extra_args)
2752
      msg = result.RemoteFailMsg()
2753
      if msg:
2754
        _ShutdownInstanceDisks(self, instance)
2755
        raise errors.OpExecError("Could not start instance for"
2756
                                 " full reboot: %s" % msg)
2757

    
2758
    self.cfg.MarkInstanceUp(instance.name)
2759

    
2760

    
2761
class LUShutdownInstance(LogicalUnit):
2762
  """Shutdown an instance.
2763

2764
  """
2765
  HPATH = "instance-stop"
2766
  HTYPE = constants.HTYPE_INSTANCE
2767
  _OP_REQP = ["instance_name"]
2768
  REQ_BGL = False
2769

    
2770
  def ExpandNames(self):
2771
    self._ExpandAndLockInstance()
2772

    
2773
  def BuildHooksEnv(self):
2774
    """Build hooks env.
2775

2776
    This runs on master, primary and secondary nodes of the instance.
2777

2778
    """
2779
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2780
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2781
    return env, nl, nl
2782

    
2783
  def CheckPrereq(self):
2784
    """Check prerequisites.
2785

2786
    This checks that the instance is in the cluster.
2787

2788
    """
2789
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2790
    assert self.instance is not None, \
2791
      "Cannot retrieve locked instance %s" % self.op.instance_name
2792
    _CheckNodeOnline(self, self.instance.primary_node)
2793

    
2794
  def Exec(self, feedback_fn):
2795
    """Shutdown the instance.
2796

2797
    """
2798
    instance = self.instance
2799
    node_current = instance.primary_node
2800
    self.cfg.MarkInstanceDown(instance.name)
2801
    result = self.rpc.call_instance_shutdown(node_current, instance)
2802
    if result.failed or not result.data:
2803
      self.proc.LogWarning("Could not shutdown instance")
2804

    
2805
    _ShutdownInstanceDisks(self, instance)
2806

    
2807

    
2808
class LUReinstallInstance(LogicalUnit):
2809
  """Reinstall an instance.
2810

2811
  """
2812
  HPATH = "instance-reinstall"
2813
  HTYPE = constants.HTYPE_INSTANCE
2814
  _OP_REQP = ["instance_name"]
2815
  REQ_BGL = False
2816

    
2817
  def ExpandNames(self):
2818
    self._ExpandAndLockInstance()
2819

    
2820
  def BuildHooksEnv(self):
2821
    """Build hooks env.
2822

2823
    This runs on master, primary and secondary nodes of the instance.
2824

2825
    """
2826
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2827
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2828
    return env, nl, nl
2829

    
2830
  def CheckPrereq(self):
2831
    """Check prerequisites.
2832

2833
    This checks that the instance is in the cluster and is not running.
2834

2835
    """
2836
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2837
    assert instance is not None, \
2838
      "Cannot retrieve locked instance %s" % self.op.instance_name
2839
    _CheckNodeOnline(self, instance.primary_node)
2840

    
2841
    if instance.disk_template == constants.DT_DISKLESS:
2842
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2843
                                 self.op.instance_name)
2844
    if instance.admin_up:
2845
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2846
                                 self.op.instance_name)
2847
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2848
                                              instance.name,
2849
                                              instance.hypervisor)
2850
    if remote_info.failed or remote_info.data:
2851
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2852
                                 (self.op.instance_name,
2853
                                  instance.primary_node))
2854

    
2855
    self.op.os_type = getattr(self.op, "os_type", None)
2856
    if self.op.os_type is not None:
2857
      # OS verification
2858
      pnode = self.cfg.GetNodeInfo(
2859
        self.cfg.ExpandNodeName(instance.primary_node))
2860
      if pnode is None:
2861
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2862
                                   self.op.pnode)
2863
      result = self.rpc.call_os_get(pnode.name, self.op.os_type)
2864
      result.Raise()
2865
      if not isinstance(result.data, objects.OS):
2866
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2867
                                   " primary node"  % self.op.os_type)
2868

    
2869
    self.instance = instance
2870

    
2871
  def Exec(self, feedback_fn):
2872
    """Reinstall the instance.
2873

2874
    """
2875
    inst = self.instance
2876

    
2877
    if self.op.os_type is not None:
2878
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2879
      inst.os = self.op.os_type
2880
      self.cfg.Update(inst)
2881

    
2882
    _StartInstanceDisks(self, inst, None)
2883
    try:
2884
      feedback_fn("Running the instance OS create scripts...")
2885
      result = self.rpc.call_instance_os_add(inst.primary_node, inst)
2886
      msg = result.RemoteFailMsg()
2887
      if msg:
2888
        raise errors.OpExecError("Could not install OS for instance %s"
2889
                                 " on node %s: %s" %
2890
                                 (inst.name, inst.primary_node, msg))
2891
    finally:
2892
      _ShutdownInstanceDisks(self, inst)
2893

    
2894

    
2895
class LURenameInstance(LogicalUnit):
2896
  """Rename an instance.
2897

2898
  """
2899
  HPATH = "instance-rename"
2900
  HTYPE = constants.HTYPE_INSTANCE
2901
  _OP_REQP = ["instance_name", "new_name"]
2902

    
2903
  def BuildHooksEnv(self):
2904
    """Build hooks env.
2905

2906
    This runs on master, primary and secondary nodes of the instance.
2907

2908
    """
2909
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2910
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2911
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2912
    return env, nl, nl
2913

    
2914
  def CheckPrereq(self):
2915
    """Check prerequisites.
2916

2917
    This checks that the instance is in the cluster and is not running.
2918

2919
    """
2920
    instance = self.cfg.GetInstanceInfo(
2921
      self.cfg.ExpandInstanceName(self.op.instance_name))
2922
    if instance is None:
2923
      raise errors.OpPrereqError("Instance '%s' not known" %
2924
                                 self.op.instance_name)
2925
    _CheckNodeOnline(self, instance.primary_node)
2926

    
2927
    if instance.admin_up:
2928
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2929
                                 self.op.instance_name)
2930
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2931
                                              instance.name,
2932
                                              instance.hypervisor)
2933
    remote_info.Raise()
2934
    if remote_info.data:
2935
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2936
                                 (self.op.instance_name,
2937
                                  instance.primary_node))
2938
    self.instance = instance
2939

    
2940
    # new name verification
2941
    name_info = utils.HostInfo(self.op.new_name)
2942

    
2943
    self.op.new_name = new_name = name_info.name
2944
    instance_list = self.cfg.GetInstanceList()
2945
    if new_name in instance_list:
2946
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2947
                                 new_name)
2948

    
2949
    if not getattr(self.op, "ignore_ip", False):
2950
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
2951
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2952
                                   (name_info.ip, new_name))
2953

    
2954

    
2955
  def Exec(self, feedback_fn):
2956
    """Reinstall the instance.
2957

2958
    """
2959
    inst = self.instance
2960
    old_name = inst.name
2961

    
2962
    if inst.disk_template == constants.DT_FILE:
2963
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2964

    
2965
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2966
    # Change the instance lock. This is definitely safe while we hold the BGL
2967
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
2968
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
2969

    
2970
    # re-read the instance from the configuration after rename
2971
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2972

    
2973
    if inst.disk_template == constants.DT_FILE:
2974
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2975
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
2976
                                                     old_file_storage_dir,
2977
                                                     new_file_storage_dir)
2978
      result.Raise()
2979
      if not result.data:
2980
        raise errors.OpExecError("Could not connect to node '%s' to rename"
2981
                                 " directory '%s' to '%s' (but the instance"
2982
                                 " has been renamed in Ganeti)" % (
2983
                                 inst.primary_node, old_file_storage_dir,
2984
                                 new_file_storage_dir))
2985

    
2986
      if not result.data[0]:
2987
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2988
                                 " (but the instance has been renamed in"
2989
                                 " Ganeti)" % (old_file_storage_dir,
2990
                                               new_file_storage_dir))
2991

    
2992
    _StartInstanceDisks(self, inst, None)
2993
    try:
2994
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
2995
                                                 old_name)
2996
      msg = result.RemoteFailMsg()
2997
      if msg:
2998
        msg = ("Could not run OS rename script for instance %s on node %s"
2999
               " (but the instance has been renamed in Ganeti): %s" %
3000
               (inst.name, inst.primary_node, msg))
3001
        self.proc.LogWarning(msg)
3002
    finally:
3003
      _ShutdownInstanceDisks(self, inst)
3004

    
3005

    
3006
class LURemoveInstance(LogicalUnit):
3007
  """Remove an instance.
3008

3009
  """
3010
  HPATH = "instance-remove"
3011
  HTYPE = constants.HTYPE_INSTANCE
3012
  _OP_REQP = ["instance_name", "ignore_failures"]
3013
  REQ_BGL = False
3014

    
3015
  def ExpandNames(self):
3016
    self._ExpandAndLockInstance()
3017
    self.needed_locks[locking.LEVEL_NODE] = []
3018
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3019

    
3020
  def DeclareLocks(self, level):
3021
    if level == locking.LEVEL_NODE:
3022
      self._LockInstancesNodes()
3023

    
3024
  def BuildHooksEnv(self):
3025
    """Build hooks env.
3026

3027
    This runs on master, primary and secondary nodes of the instance.
3028

3029
    """
3030
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3031
    nl = [self.cfg.GetMasterNode()]
3032
    return env, nl, nl
3033

    
3034
  def CheckPrereq(self):
3035
    """Check prerequisites.
3036

3037
    This checks that the instance is in the cluster.
3038

3039
    """
3040
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3041
    assert self.instance is not None, \
3042
      "Cannot retrieve locked instance %s" % self.op.instance_name
3043

    
3044
  def Exec(self, feedback_fn):
3045
    """Remove the instance.
3046

3047
    """
3048
    instance = self.instance
3049
    logging.info("Shutting down instance %s on node %s",
3050
                 instance.name, instance.primary_node)
3051

    
3052
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
3053
    if result.failed or not result.data:
3054
      if self.op.ignore_failures:
3055
        feedback_fn("Warning: can't shutdown instance")
3056
      else:
3057
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
3058
                                 (instance.name, instance.primary_node))
3059

    
3060
    logging.info("Removing block devices for instance %s", instance.name)
3061

    
3062
    if not _RemoveDisks(self, instance):
3063
      if self.op.ignore_failures:
3064
        feedback_fn("Warning: can't remove instance's disks")
3065
      else:
3066
        raise errors.OpExecError("Can't remove instance's disks")
3067

    
3068
    logging.info("Removing instance %s out of cluster config", instance.name)
3069

    
3070
    self.cfg.RemoveInstance(instance.name)
3071
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
3072

    
3073

    
3074
class LUQueryInstances(NoHooksLU):
3075
  """Logical unit for querying instances.
3076

3077
  """
3078
  _OP_REQP = ["output_fields", "names", "use_locking"]
3079
  REQ_BGL = False
3080
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
3081
                                    "admin_state", "admin_ram",
3082
                                    "disk_template", "ip", "mac", "bridge",
3083
                                    "sda_size", "sdb_size", "vcpus", "tags",
3084
                                    "network_port", "beparams",
3085
                                    "(disk).(size)/([0-9]+)",
3086
                                    "(disk).(sizes)",
3087
                                    "(nic).(mac|ip|bridge)/([0-9]+)",
3088
                                    "(nic).(macs|ips|bridges)",
3089
                                    "(disk|nic).(count)",
3090
                                    "serial_no", "hypervisor", "hvparams",] +
3091
                                  ["hv/%s" % name
3092
                                   for name in constants.HVS_PARAMETERS] +
3093
                                  ["be/%s" % name
3094
                                   for name in constants.BES_PARAMETERS])
3095
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
3096

    
3097

    
3098
  def ExpandNames(self):
3099
    _CheckOutputFields(static=self._FIELDS_STATIC,
3100
                       dynamic=self._FIELDS_DYNAMIC,
3101
                       selected=self.op.output_fields)
3102

    
3103
    self.needed_locks = {}
3104
    self.share_locks[locking.LEVEL_INSTANCE] = 1
3105
    self.share_locks[locking.LEVEL_NODE] = 1
3106

    
3107
    if self.op.names:
3108
      self.wanted = _GetWantedInstances(self, self.op.names)
3109
    else:
3110
      self.wanted = locking.ALL_SET
3111

    
3112
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
3113
    self.do_locking = self.do_node_query and self.op.use_locking
3114
    if self.do_locking:
3115
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3116
      self.needed_locks[locking.LEVEL_NODE] = []
3117
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3118

    
3119
  def DeclareLocks(self, level):
3120
    if level == locking.LEVEL_NODE and self.do_locking:
3121
      self._LockInstancesNodes()
3122

    
3123
  def CheckPrereq(self):
3124
    """Check prerequisites.
3125

3126
    """
3127
    pass
3128

    
3129
  def Exec(self, feedback_fn):
3130
    """Computes the list of nodes and their attributes.
3131

3132
    """
3133
    all_info = self.cfg.GetAllInstancesInfo()
3134
    if self.wanted == locking.ALL_SET:
3135
      # caller didn't specify instance names, so ordering is not important
3136
      if self.do_locking:
3137
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
3138
      else:
3139
        instance_names = all_info.keys()
3140
      instance_names = utils.NiceSort(instance_names)
3141
    else:
3142
      # caller did specify names, so we must keep the ordering
3143
      if self.do_locking:
3144
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
3145
      else:
3146
        tgt_set = all_info.keys()
3147
      missing = set(self.wanted).difference(tgt_set)
3148
      if missing:
3149
        raise errors.OpExecError("Some instances were removed before"
3150
                                 " retrieving their data: %s" % missing)
3151
      instance_names = self.wanted
3152

    
3153
    instance_list = [all_info[iname] for iname in instance_names]
3154

    
3155
    # begin data gathering
3156

    
3157
    nodes = frozenset([inst.primary_node for inst in instance_list])
3158
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
3159

    
3160
    bad_nodes = []
3161
    off_nodes = []
3162
    if self.do_node_query:
3163
      live_data = {}
3164
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
3165
      for name in nodes:
3166
        result = node_data[name]
3167
        if result.offline:
3168
          # offline nodes will be in both lists
3169
          off_nodes.append(name)
3170
        if result.failed:
3171
          bad_nodes.append(name)
3172
        else:
3173
          if result.data:
3174
            live_data.update(result.data)
3175
            # else no instance is alive
3176
    else:
3177
      live_data = dict([(name, {}) for name in instance_names])
3178

    
3179
    # end data gathering
3180

    
3181
    HVPREFIX = "hv/"
3182
    BEPREFIX = "be/"
3183
    output = []
3184
    for instance in instance_list:
3185
      iout = []
3186
      i_hv = self.cfg.GetClusterInfo().FillHV(instance)
3187
      i_be = self.cfg.GetClusterInfo().FillBE(instance)
3188
      for field in self.op.output_fields:
3189
        st_match = self._FIELDS_STATIC.Matches(field)
3190
        if field == "name":
3191
          val = instance.name
3192
        elif field == "os":
3193
          val = instance.os
3194
        elif field == "pnode":
3195
          val = instance.primary_node
3196
        elif field == "snodes":
3197
          val = list(instance.secondary_nodes)
3198
        elif field == "admin_state":
3199
          val = instance.admin_up
3200
        elif field == "oper_state":
3201
          if instance.primary_node in bad_nodes:
3202
            val = None
3203
          else:
3204
            val = bool(live_data.get(instance.name))
3205
        elif field == "status":
3206
          if instance.primary_node in off_nodes:
3207
            val = "ERROR_nodeoffline"
3208
          elif instance.primary_node in bad_nodes:
3209
            val = "ERROR_nodedown"
3210
          else:
3211
            running = bool(live_data.get(instance.name))
3212
            if running:
3213
              if instance.admin_up:
3214
                val = "running"
3215
              else:
3216
                val = "ERROR_up"
3217
            else:
3218
              if instance.admin_up:
3219
                val = "ERROR_down"
3220
              else:
3221
                val = "ADMIN_down"
3222
        elif field == "oper_ram":
3223
          if instance.primary_node in bad_nodes:
3224
            val = None
3225
          elif instance.name in live_data:
3226
            val = live_data[instance.name].get("memory", "?")
3227
          else:
3228
            val = "-"
3229
        elif field == "disk_template":
3230
          val = instance.disk_template
3231
        elif field == "ip":
3232
          val = instance.nics[0].ip
3233
        elif field == "bridge":
3234
          val = instance.nics[0].bridge
3235
        elif field == "mac":
3236
          val = instance.nics[0].mac
3237
        elif field == "sda_size" or field == "sdb_size":
3238
          idx = ord(field[2]) - ord('a')
3239
          try:
3240
            val = instance.FindDisk(idx).size
3241
          except errors.OpPrereqError:
3242
            val = None
3243
        elif field == "tags":
3244
          val = list(instance.GetTags())
3245
        elif field == "serial_no":
3246
          val = instance.serial_no
3247
        elif field == "network_port":
3248
          val = instance.network_port
3249
        elif field == "hypervisor":
3250
          val = instance.hypervisor
3251
        elif field == "hvparams":
3252
          val = i_hv
3253
        elif (field.startswith(HVPREFIX) and
3254
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
3255
          val = i_hv.get(field[len(HVPREFIX):], None)
3256
        elif field == "beparams":
3257
          val = i_be
3258
        elif (field.startswith(BEPREFIX) and
3259
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
3260
          val = i_be.get(field[len(BEPREFIX):], None)
3261
        elif st_match and st_match.groups():
3262
          # matches a variable list
3263
          st_groups = st_match.groups()
3264
          if st_groups and st_groups[0] == "disk":
3265
            if st_groups[1] == "count":
3266
              val = len(instance.disks)
3267
            elif st_groups[1] == "sizes":
3268
              val = [disk.size for disk in instance.disks]
3269
            elif st_groups[1] == "size":
3270
              try:
3271
                val = instance.FindDisk(st_groups[2]).size
3272
              except errors.OpPrereqError:
3273
                val = None
3274
            else:
3275
              assert False, "Unhandled disk parameter"
3276
          elif st_groups[0] == "nic":
3277
            if st_groups[1] == "count":
3278
              val = len(instance.nics)
3279
            elif st_groups[1] == "macs":
3280
              val = [nic.mac for nic in instance.nics]
3281
            elif st_groups[1] == "ips":
3282
              val = [nic.ip for nic in instance.nics]
3283
            elif st_groups[1] == "bridges":
3284
              val = [nic.bridge for nic in instance.nics]
3285
            else:
3286
              # index-based item
3287
              nic_idx = int(st_groups[2])
3288
              if nic_idx >= len(instance.nics):
3289
                val = None
3290
              else:
3291
                if st_groups[1] == "mac":
3292
                  val = instance.nics[nic_idx].mac
3293
                elif st_groups[1] == "ip":
3294
                  val = instance.nics[nic_idx].ip
3295
                elif st_groups[1] == "bridge":
3296
                  val = instance.nics[nic_idx].bridge
3297
                else:
3298
                  assert False, "Unhandled NIC parameter"
3299
          else:
3300
            assert False, "Unhandled variable parameter"
3301
        else:
3302
          raise errors.ParameterError(field)
3303
        iout.append(val)
3304
      output.append(iout)
3305

    
3306
    return output
3307

    
3308

    
3309
class LUFailoverInstance(LogicalUnit):
3310
  """Failover an instance.
3311

3312
  """
3313
  HPATH = "instance-failover"
3314
  HTYPE = constants.HTYPE_INSTANCE
3315
  _OP_REQP = ["instance_name", "ignore_consistency"]
3316
  REQ_BGL = False
3317

    
3318
  def ExpandNames(self):
3319
    self._ExpandAndLockInstance()
3320
    self.needed_locks[locking.LEVEL_NODE] = []
3321
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3322

    
3323
  def DeclareLocks(self, level):
3324
    if level == locking.LEVEL_NODE:
3325
      self._LockInstancesNodes()
3326

    
3327
  def BuildHooksEnv(self):
3328
    """Build hooks env.
3329

3330
    This runs on master, primary and secondary nodes of the instance.
3331

3332
    """
3333
    env = {
3334
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
3335
      }
3336
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3337
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3338
    return env, nl, nl
3339

    
3340
  def CheckPrereq(self):
3341
    """Check prerequisites.
3342

3343
    This checks that the instance is in the cluster.
3344

3345
    """
3346
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3347
    assert self.instance is not None, \
3348
      "Cannot retrieve locked instance %s" % self.op.instance_name
3349

    
3350
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3351
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3352
      raise errors.OpPrereqError("Instance's disk layout is not"
3353
                                 " network mirrored, cannot failover.")
3354

    
3355
    secondary_nodes = instance.secondary_nodes
3356
    if not secondary_nodes:
3357
      raise errors.ProgrammerError("no secondary node but using "
3358
                                   "a mirrored disk template")
3359

    
3360
    target_node = secondary_nodes[0]
3361
    _CheckNodeOnline(self, target_node)
3362
    # check memory requirements on the secondary node
3363
    _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
3364
                         instance.name, bep[constants.BE_MEMORY],
3365
                         instance.hypervisor)
3366

    
3367
    # check bridge existance
3368
    brlist = [nic.bridge for nic in instance.nics]
3369
    result = self.rpc.call_bridges_exist(target_node, brlist)
3370
    result.Raise()
3371
    if not result.data:
3372
      raise errors.OpPrereqError("One or more target bridges %s does not"
3373
                                 " exist on destination node '%s'" %
3374
                                 (brlist, target_node))
3375

    
3376
  def Exec(self, feedback_fn):
3377
    """Failover an instance.
3378

3379
    The failover is done by shutting it down on its present node and
3380
    starting it on the secondary.
3381

3382
    """
3383
    instance = self.instance
3384

    
3385
    source_node = instance.primary_node
3386
    target_node = instance.secondary_nodes[0]
3387

    
3388
    feedback_fn("* checking disk consistency between source and target")
3389
    for dev in instance.disks:
3390
      # for drbd, these are drbd over lvm
3391
      if not _CheckDiskConsistency(self, dev, target_node, False):
3392
        if instance.admin_up and not self.op.ignore_consistency:
3393
          raise errors.OpExecError("Disk %s is degraded on target node,"
3394
                                   " aborting failover." % dev.iv_name)
3395

    
3396
    feedback_fn("* shutting down instance on source node")
3397
    logging.info("Shutting down instance %s on node %s",
3398
                 instance.name, source_node)
3399

    
3400
    result = self.rpc.call_instance_shutdown(source_node, instance)
3401
    if result.failed or not result.data:
3402
      if self.op.ignore_consistency:
3403
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
3404
                             " Proceeding"
3405
                             " anyway. Please make sure node %s is down",
3406
                             instance.name, source_node, source_node)
3407
      else:
3408
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
3409
                                 (instance.name, source_node))
3410

    
3411
    feedback_fn("* deactivating the instance's disks on source node")
3412
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3413
      raise errors.OpExecError("Can't shut down the instance's disks.")
3414

    
3415
    instance.primary_node = target_node
3416
    # distribute new instance config to the other nodes
3417
    self.cfg.Update(instance)
3418

    
3419
    # Only start the instance if it's marked as up
3420
    if instance.admin_up:
3421
      feedback_fn("* activating the instance's disks on target node")
3422
      logging.info("Starting instance %s on node %s",
3423
                   instance.name, target_node)
3424

    
3425
      disks_ok, dummy = _AssembleInstanceDisks(self, instance,
3426
                                               ignore_secondaries=True)
3427
      if not disks_ok:
3428
        _ShutdownInstanceDisks(self, instance)
3429
        raise errors.OpExecError("Can't activate the instance's disks")
3430

    
3431
      feedback_fn("* starting the instance on the target node")
3432
      result = self.rpc.call_instance_start(target_node, instance, None)
3433
      msg = result.RemoteFailMsg()
3434
      if msg:
3435
        _ShutdownInstanceDisks(self, instance)
3436
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
3437
                                 (instance.name, target_node, msg))
3438

    
3439

    
3440
class LUMigrateInstance(LogicalUnit):
3441
  """Migrate an instance.
3442

3443
  This is migration without shutting down, compared to the failover,
3444
  which is done with shutdown.
3445

3446
  """
3447
  HPATH = "instance-migrate"
3448
  HTYPE = constants.HTYPE_INSTANCE
3449
  _OP_REQP = ["instance_name", "live", "cleanup"]
3450

    
3451
  REQ_BGL = False
3452

    
3453
  def ExpandNames(self):
3454
    self._ExpandAndLockInstance()
3455
    self.needed_locks[locking.LEVEL_NODE] = []
3456
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3457

    
3458
  def DeclareLocks(self, level):
3459
    if level == locking.LEVEL_NODE:
3460
      self._LockInstancesNodes()
3461

    
3462
  def BuildHooksEnv(self):
3463
    """Build hooks env.
3464

3465
    This runs on master, primary and secondary nodes of the instance.
3466

3467
    """
3468
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3469
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3470
    return env, nl, nl
3471

    
3472
  def CheckPrereq(self):
3473
    """Check prerequisites.
3474

3475
    This checks that the instance is in the cluster.
3476

3477
    """
3478
    instance = self.cfg.GetInstanceInfo(
3479
      self.cfg.ExpandInstanceName(self.op.instance_name))
3480
    if instance is None:
3481
      raise errors.OpPrereqError("Instance '%s' not known" %
3482
                                 self.op.instance_name)
3483

    
3484
    if instance.disk_template != constants.DT_DRBD8:
3485
      raise errors.OpPrereqError("Instance's disk layout is not"
3486
                                 " drbd8, cannot migrate.")
3487

    
3488
    secondary_nodes = instance.secondary_nodes
3489
    if not secondary_nodes:
3490
      raise errors.ProgrammerError("no secondary node but using "
3491
                                   "drbd8 disk template")
3492

    
3493
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
3494

    
3495
    target_node = secondary_nodes[0]
3496
    # check memory requirements on the secondary node
3497
    _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
3498
                         instance.name, i_be[constants.BE_MEMORY],
3499
                         instance.hypervisor)
3500

    
3501
    # check bridge existance
3502
    brlist = [nic.bridge for nic in instance.nics]
3503
    result = self.rpc.call_bridges_exist(target_node, brlist)
3504
    if result.failed or not result.data:
3505
      raise errors.OpPrereqError("One or more target bridges %s does not"
3506
                                 " exist on destination node '%s'" %
3507
                                 (brlist, target_node))
3508

    
3509
    if not self.op.cleanup:
3510
      result = self.rpc.call_instance_migratable(instance.primary_node,
3511
                                                 instance)
3512
      msg = result.RemoteFailMsg()
3513
      if msg:
3514
        raise errors.OpPrereqError("Can't migrate: %s - please use failover" %
3515
                                   msg)
3516

    
3517
    self.instance = instance
3518

    
3519
  def _WaitUntilSync(self):
3520
    """Poll with custom rpc for disk sync.
3521

3522
    This uses our own step-based rpc call.
3523

3524
    """
3525
    self.feedback_fn("* wait until resync is done")
3526
    all_done = False
3527
    while not all_done:
3528
      all_done = True
3529
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
3530
                                            self.nodes_ip,
3531
                                            self.instance.disks)
3532
      min_percent = 100
3533
      for node, nres in result.items():
3534
        msg = nres.RemoteFailMsg()
3535
        if msg:
3536
          raise errors.OpExecError("Cannot resync disks on node %s: %s" %
3537
                                   (node, msg))
3538
        node_done, node_percent = nres.data[1]
3539
        all_done = all_done and node_done
3540
        if node_percent is not None:
3541
          min_percent = min(min_percent, node_percent)
3542
      if not all_done:
3543
        if min_percent < 100:
3544
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
3545
        time.sleep(2)
3546

    
3547
  def _EnsureSecondary(self, node):
3548
    """Demote a node to secondary.
3549

3550
    """
3551
    self.feedback_fn("* switching node %s to secondary mode" % node)
3552

    
3553
    for dev in self.instance.disks:
3554
      self.cfg.SetDiskID(dev, node)
3555

    
3556
    result = self.rpc.call_blockdev_close(node, self.instance.name,
3557
                                          self.instance.disks)
3558
    msg = result.RemoteFailMsg()
3559
    if msg:
3560
      raise errors.OpExecError("Cannot change disk to secondary on node %s,"
3561
                               " error %s" % (node, msg))
3562

    
3563
  def _GoStandalone(self):
3564
    """Disconnect from the network.
3565

3566
    """
3567
    self.feedback_fn("* changing into standalone mode")
3568
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
3569
                                               self.instance.disks)
3570
    for node, nres in result.items():
3571
      msg = nres.RemoteFailMsg()
3572
      if msg:
3573
        raise errors.OpExecError("Cannot disconnect disks node %s,"
3574
                                 " error %s" % (node, msg))
3575

    
3576
  def _GoReconnect(self, multimaster):
3577
    """Reconnect to the network.
3578

3579
    """
3580
    if multimaster:
3581
      msg = "dual-master"
3582
    else:
3583
      msg = "single-master"
3584
    self.feedback_fn("* changing disks into %s mode" % msg)
3585
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
3586
                                           self.instance.disks,
3587
                                           self.instance.name, multimaster)
3588
    for node, nres in result.items():
3589
      msg = nres.RemoteFailMsg()
3590
      if msg:
3591
        raise errors.OpExecError("Cannot change disks config on node %s,"
3592
                                 " error: %s" % (node, msg))
3593

    
3594
  def _ExecCleanup(self):
3595
    """Try to cleanup after a failed migration.
3596

3597
    The cleanup is done by:
3598
      - check that the instance is running only on one node
3599
        (and update the config if needed)
3600
      - change disks on its secondary node to secondary
3601
      - wait until disks are fully synchronized
3602
      - disconnect from the network
3603
      - change disks into single-master mode
3604
      - wait again until disks are fully synchronized
3605

3606
    """
3607
    instance = self.instance
3608
    target_node = self.target_node
3609
    source_node = self.source_node
3610

    
3611
    # check running on only one node
3612
    self.feedback_fn("* checking where the instance actually runs"
3613
                     " (if this hangs, the hypervisor might be in"
3614
                     " a bad state)")
3615
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
3616
    for node, result in ins_l.items():
3617
      result.Raise()
3618
      if not isinstance(result.data, list):
3619
        raise errors.OpExecError("Can't contact node '%s'" % node)
3620

    
3621
    runningon_source = instance.name in ins_l[source_node].data
3622
    runningon_target = instance.name in ins_l[target_node].data
3623

    
3624
    if runningon_source and runningon_target:
3625
      raise errors.OpExecError("Instance seems to be running on two nodes,"
3626
                               " or the hypervisor is confused. You will have"
3627
                               " to ensure manually that it runs only on one"
3628
                               " and restart this operation.")
3629

    
3630
    if not (runningon_source or runningon_target):
3631
      raise errors.OpExecError("Instance does not seem to be running at all."
3632
                               " In this case, it's safer to repair by"
3633
                               " running 'gnt-instance stop' to ensure disk"
3634
                               " shutdown, and then restarting it.")
3635

    
3636
    if runningon_target:
3637
      # the migration has actually succeeded, we need to update the config
3638
      self.feedback_fn("* instance running on secondary node (%s),"
3639
                       " updating config" % target_node)
3640
      instance.primary_node = target_node
3641
      self.cfg.Update(instance)
3642
      demoted_node = source_node
3643
    else:
3644
      self.feedback_fn("* instance confirmed to be running on its"
3645
                       " primary node (%s)" % source_node)
3646
      demoted_node = target_node
3647

    
3648
    self._EnsureSecondary(demoted_node)
3649
    try:
3650
      self._WaitUntilSync()
3651
    except errors.OpExecError:
3652
      # we ignore here errors, since if the device is standalone, it
3653
      # won't be able to sync
3654
      pass
3655
    self._GoStandalone()
3656
    self._GoReconnect(False)
3657
    self._WaitUntilSync()
3658

    
3659
    self.feedback_fn("* done")
3660

    
3661
  def _RevertDiskStatus(self):
3662
    """Try to revert the disk status after a failed migration.
3663

3664
    """
3665
    target_node = self.target_node
3666
    try:
3667
      self._EnsureSecondary(target_node)
3668
      self._GoStandalone()
3669
      self._GoReconnect(False)
3670
      self._WaitUntilSync()
3671
    except errors.OpExecError, err:
3672
      self.LogWarning("Migration failed and I can't reconnect the"
3673
                      " drives: error '%s'\n"
3674
                      "Please look and recover the instance status" %
3675
                      str(err))
3676

    
3677
  def _AbortMigration(self):
3678
    """Call the hypervisor code to abort a started migration.
3679

3680
    """
3681
    instance = self.instance
3682
    target_node = self.target_node
3683
    migration_info = self.migration_info
3684

    
3685
    abort_result = self.rpc.call_finalize_migration(target_node,
3686
                                                    instance,
3687
                                                    migration_info,
3688
                                                    False)
3689
    abort_msg = abort_result.RemoteFailMsg()
3690
    if abort_msg:
3691
      logging.error("Aborting migration failed on target node %s: %s" %
3692
                    (target_node, abort_msg))
3693
      # Don't raise an exception here, as we stil have to try to revert the
3694
      # disk status, even if this step failed.
3695

    
3696
  def _ExecMigration(self):
3697
    """Migrate an instance.
3698

3699
    The migrate is done by:
3700
      - change the disks into dual-master mode
3701
      - wait until disks are fully synchronized again
3702
      - migrate the instance
3703
      - change disks on the new secondary node (the old primary) to secondary
3704
      - wait until disks are fully synchronized
3705
      - change disks into single-master mode
3706

3707
    """
3708
    instance = self.instance
3709
    target_node = self.target_node
3710
    source_node = self.source_node
3711

    
3712
    self.feedback_fn("* checking disk consistency between source and target")
3713
    for dev in instance.disks:
3714
      if not _CheckDiskConsistency(self, dev, target_node, False):
3715
        raise errors.OpExecError("Disk %s is degraded or not fully"
3716
                                 " synchronized on target node,"
3717
                                 " aborting migrate." % dev.iv_name)
3718

    
3719
    # First get the migration information from the remote node
3720
    result = self.rpc.call_migration_info(source_node, instance)
3721
    msg = result.RemoteFailMsg()
3722
    if msg:
3723
      log_err = ("Failed fetching source migration information from %s: %s" %
3724
                  (source_node, msg))
3725
      logging.error(log_err)
3726
      raise errors.OpExecError(log_err)
3727

    
3728
    self.migration_info = migration_info = result.data[1]
3729

    
3730
    # Then switch the disks to master/master mode
3731
    self._EnsureSecondary(target_node)
3732
    self._GoStandalone()
3733
    self._GoReconnect(True)
3734
    self._WaitUntilSync()
3735

    
3736
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
3737
    result = self.rpc.call_accept_instance(target_node,
3738
                                           instance,
3739
                                           migration_info,
3740
                                           self.nodes_ip[target_node])
3741

    
3742
    msg = result.RemoteFailMsg()
3743
    if msg:
3744
      logging.error("Instance pre-migration failed, trying to revert"
3745
                    " disk status: %s", msg)
3746
      self._AbortMigration()
3747
      self._RevertDiskStatus()
3748
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
3749
                               (instance.name, msg))
3750

    
3751
    self.feedback_fn("* migrating instance to %s" % target_node)
3752
    time.sleep(10)
3753
    result = self.rpc.call_instance_migrate(source_node, instance,
3754
                                            self.nodes_ip[target_node],
3755
                                            self.op.live)
3756
    msg = result.RemoteFailMsg()
3757
    if msg:
3758
      logging.error("Instance migration failed, trying to revert"
3759
                    " disk status: %s", msg)
3760
      self._AbortMigration()
3761
      self._RevertDiskStatus()
3762
      raise errors.OpExecError("Could not migrate instance %s: %s" %
3763
                               (instance.name, msg))
3764
    time.sleep(10)
3765

    
3766
    instance.primary_node = target_node
3767
    # distribute new instance config to the other nodes
3768
    self.cfg.Update(instance)
3769

    
3770
    result = self.rpc.call_finalize_migration(target_node,
3771
                                              instance,
3772
                                              migration_info,
3773
                                              True)
3774
    msg = result.RemoteFailMsg()
3775
    if msg:
3776
      logging.error("Instance migration succeeded, but finalization failed:"
3777
                    " %s" % msg)
3778
      raise errors.OpExecError("Could not finalize instance migration: %s" %
3779
                               msg)
3780

    
3781
    self._EnsureSecondary(source_node)
3782
    self._WaitUntilSync()
3783
    self._GoStandalone()
3784
    self._GoReconnect(False)
3785
    self._WaitUntilSync()
3786

    
3787
    self.feedback_fn("* done")
3788

    
3789
  def Exec(self, feedback_fn):
3790
    """Perform the migration.
3791

3792
    """
3793
    self.feedback_fn = feedback_fn
3794

    
3795
    self.source_node = self.instance.primary_node
3796
    self.target_node = self.instance.secondary_nodes[0]
3797
    self.all_nodes = [self.source_node, self.target_node]
3798
    self.nodes_ip = {
3799
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
3800
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
3801
      }
3802
    if self.op.cleanup:
3803
      return self._ExecCleanup()
3804
    else:
3805
      return self._ExecMigration()
3806

    
3807

    
3808
def _CreateBlockDev(lu, node, instance, device, force_create,
3809
                    info, force_open):
3810
  """Create a tree of block devices on a given node.
3811

3812
  If this device type has to be created on secondaries, create it and
3813
  all its children.
3814

3815
  If not, just recurse to children keeping the same 'force' value.
3816

3817
  @param lu: the lu on whose behalf we execute
3818
  @param node: the node on which to create the device
3819
  @type instance: L{objects.Instance}
3820
  @param instance: the instance which owns the device
3821
  @type device: L{objects.Disk}
3822
  @param device: the device to create
3823
  @type force_create: boolean
3824
  @param force_create: whether to force creation of this device; this
3825
      will be change to True whenever we find a device which has
3826
      CreateOnSecondary() attribute
3827
  @param info: the extra 'metadata' we should attach to the device
3828
      (this will be represented as a LVM tag)
3829
  @type force_open: boolean
3830
  @param force_open: this parameter will be passes to the
3831
      L{backend.CreateBlockDevice} function where it specifies
3832
      whether we run on primary or not, and it affects both
3833
      the child assembly and the device own Open() execution
3834

3835
  """
3836
  if device.CreateOnSecondary():
3837
    force_create = True
3838

    
3839
  if device.children:
3840
    for child in device.children:
3841
      _CreateBlockDev(lu, node, instance, child, force_create,
3842
                      info, force_open)
3843

    
3844
  if not force_create:
3845
    return
3846

    
3847
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
3848

    
3849

    
3850
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
3851
  """Create a single block device on a given node.
3852

3853
  This will not recurse over children of the device, so they must be
3854
  created in advance.
3855

3856
  @param lu: the lu on whose behalf we execute
3857
  @param node: the node on which to create the device
3858
  @type instance: L{objects.Instance}
3859
  @param instance: the instance which owns the device
3860
  @type device: L{objects.Disk}
3861
  @param device: the device to create
3862
  @param info: the extra 'metadata' we should attach to the device
3863
      (this will be represented as a LVM tag)
3864
  @type force_open: boolean
3865
  @param force_open: this parameter will be passes to the
3866
      L{backend.CreateBlockDevice} function where it specifies
3867
      whether we run on primary or not, and it affects both
3868
      the child assembly and the device own Open() execution
3869

3870
  """
3871
  lu.cfg.SetDiskID(device, node)
3872
  result = lu.rpc.call_blockdev_create(node, device, device.size,
3873
                                       instance.name, force_open, info)
3874
  msg = result.RemoteFailMsg()
3875
  if msg:
3876
    raise errors.OpExecError("Can't create block device %s on"
3877
                             " node %s for instance %s: %s" %
3878
                             (device, node, instance.name, msg))
3879
  if device.physical_id is None:
3880
    device.physical_id = result.data[1]
3881

    
3882

    
3883
def _GenerateUniqueNames(lu, exts):
3884
  """Generate a suitable LV name.
3885

3886
  This will generate a logical volume name for the given instance.
3887

3888
  """
3889
  results = []
3890
  for val in exts:
3891
    new_id = lu.cfg.GenerateUniqueID()
3892
    results.append("%s%s" % (new_id, val))
3893
  return results
3894

    
3895

    
3896
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
3897
                         p_minor, s_minor):
3898
  """Generate a drbd8 device complete with its children.
3899

3900
  """
3901
  port = lu.cfg.AllocatePort()
3902
  vgname = lu.cfg.GetVGName()
3903
  shared_secret = lu.cfg.GenerateDRBDSecret()
3904
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3905
                          logical_id=(vgname, names[0]))
3906
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3907
                          logical_id=(vgname, names[1]))
3908
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
3909
                          logical_id=(primary, secondary, port,
3910
                                      p_minor, s_minor,
3911
                                      shared_secret),
3912
                          children=[dev_data, dev_meta],
3913
                          iv_name=iv_name)
3914
  return drbd_dev
3915

    
3916

    
3917
def _GenerateDiskTemplate(lu, template_name,
3918
                          instance_name, primary_node,
3919
                          secondary_nodes, disk_info,
3920
                          file_storage_dir, file_driver,
3921
                          base_index):
3922
  """Generate the entire disk layout for a given template type.
3923

3924
  """
3925
  #TODO: compute space requirements
3926

    
3927
  vgname = lu.cfg.GetVGName()
3928
  disk_count = len(disk_info)
3929
  disks = []
3930
  if template_name == constants.DT_DISKLESS:
3931
    pass
3932
  elif template_name == constants.DT_PLAIN:
3933
    if len(secondary_nodes) != 0:
3934
      raise errors.ProgrammerError("Wrong template configuration")
3935

    
3936
    names = _GenerateUniqueNames(lu, [".disk%d" % i
3937
                                      for i in range(disk_count)])
3938
    for idx, disk in enumerate(disk_info):
3939
      disk_index = idx + base_index
3940
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
3941
                              logical_id=(vgname, names[idx]),
3942
                              iv_name="disk/%d" % disk_index,
3943
                              mode=disk["mode"])
3944
      disks.append(disk_dev)
3945
  elif template_name == constants.DT_DRBD8:
3946
    if len(secondary_nodes) != 1:
3947
      raise errors.ProgrammerError("Wrong template configuration")
3948
    remote_node = secondary_nodes[0]
3949
    minors = lu.cfg.AllocateDRBDMinor(
3950
      [primary_node, remote_node] * len(disk_info), instance_name)
3951

    
3952
    names = []
3953
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % i
3954
                                               for i in range(disk_count)]):
3955
      names.append(lv_prefix + "_data")
3956
      names.append(lv_prefix + "_meta")
3957
    for idx, disk in enumerate(disk_info):
3958
      disk_index = idx + base_index
3959
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
3960
                                      disk["size"], names[idx*2:idx*2+2],
3961
                                      "disk/%d" % disk_index,
3962
                                      minors[idx*2], minors[idx*2+1])
3963
      disk_dev.mode = disk["mode"]
3964
      disks.append(disk_dev)
3965
  elif template_name == constants.DT_FILE:
3966
    if len(secondary_nodes) != 0:
3967
      raise errors.ProgrammerError("Wrong template configuration")
3968

    
3969
    for idx, disk in enumerate(disk_info):
3970
      disk_index = idx + base_index
3971
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
3972
                              iv_name="disk/%d" % disk_index,
3973
                              logical_id=(file_driver,
3974
                                          "%s/disk%d" % (file_storage_dir,
3975
                                                         idx)),
3976
                              mode=disk["mode"])
3977
      disks.append(disk_dev)
3978
  else:
3979
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
3980
  return disks
3981

    
3982

    
3983
def _GetInstanceInfoText(instance):
3984
  """Compute that text that should be added to the disk's metadata.
3985

3986
  """
3987
  return "originstname+%s" % instance.name
3988

    
3989

    
3990
def _CreateDisks(lu, instance):
3991
  """Create all disks for an instance.
3992

3993
  This abstracts away some work from AddInstance.
3994

3995
  @type lu: L{LogicalUnit}
3996
  @param lu: the logical unit on whose behalf we execute
3997
  @type instance: L{objects.Instance}
3998
  @param instance: the instance whose disks we should create
3999
  @rtype: boolean
4000
  @return: the success of the creation
4001

4002
  """
4003
  info = _GetInstanceInfoText(instance)
4004
  pnode = instance.primary_node
4005

    
4006
  if instance.disk_template == constants.DT_FILE:
4007
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4008
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
4009

    
4010
    if result.failed or not result.data:
4011
      raise errors.OpExecError("Could not connect to node '%s'" % pnode)
4012

    
4013
    if not result.data[0]:
4014
      raise errors.OpExecError("Failed to create directory '%s'" %
4015
                               file_storage_dir)