Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 2928f08d

History | View | Annotate | Download (233.1 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the master-side code."""
23

    
24
# pylint: disable-msg=W0613,W0201
25

    
26
import os
27
import os.path
28
import sha
29
import time
30
import tempfile
31
import re
32
import platform
33
import logging
34
import copy
35
import random
36

    
37
from ganeti import ssh
38
from ganeti import utils
39
from ganeti import errors
40
from ganeti import hypervisor
41
from ganeti import locking
42
from ganeti import constants
43
from ganeti import objects
44
from ganeti import opcodes
45
from ganeti import serializer
46
from ganeti import ssconf
47

    
48

    
49
class LogicalUnit(object):
50
  """Logical Unit base class.
51

52
  Subclasses must follow these rules:
53
    - implement ExpandNames
54
    - implement CheckPrereq
55
    - implement Exec
56
    - implement BuildHooksEnv
57
    - redefine HPATH and HTYPE
58
    - optionally redefine their run requirements:
59
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
60

61
  Note that all commands require root permissions.
62

63
  """
64
  HPATH = None
65
  HTYPE = None
66
  _OP_REQP = []
67
  REQ_BGL = True
68

    
69
  def __init__(self, processor, op, context, rpc):
70
    """Constructor for LogicalUnit.
71

72
    This needs to be overriden in derived classes in order to check op
73
    validity.
74

75
    """
76
    self.proc = processor
77
    self.op = op
78
    self.cfg = context.cfg
79
    self.context = context
80
    self.rpc = rpc
81
    # Dicts used to declare locking needs to mcpu
82
    self.needed_locks = None
83
    self.acquired_locks = {}
84
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
85
    self.add_locks = {}
86
    self.remove_locks = {}
87
    # Used to force good behavior when calling helper functions
88
    self.recalculate_locks = {}
89
    self.__ssh = None
90
    # logging
91
    self.LogWarning = processor.LogWarning
92
    self.LogInfo = processor.LogInfo
93

    
94
    for attr_name in self._OP_REQP:
95
      attr_val = getattr(op, attr_name, None)
96
      if attr_val is None:
97
        raise errors.OpPrereqError("Required parameter '%s' missing" %
98
                                   attr_name)
99
    self.CheckArguments()
100

    
101
  def __GetSSH(self):
102
    """Returns the SshRunner object
103

104
    """
105
    if not self.__ssh:
106
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
107
    return self.__ssh
108

    
109
  ssh = property(fget=__GetSSH)
110

    
111
  def CheckArguments(self):
112
    """Check syntactic validity for the opcode arguments.
113

114
    This method is for doing a simple syntactic check and ensure
115
    validity of opcode parameters, without any cluster-related
116
    checks. While the same can be accomplished in ExpandNames and/or
117
    CheckPrereq, doing these separate is better because:
118

119
      - ExpandNames is left as as purely a lock-related function
120
      - CheckPrereq is run after we have aquired locks (and possible
121
        waited for them)
122

123
    The function is allowed to change the self.op attribute so that
124
    later methods can no longer worry about missing parameters.
125

126
    """
127
    pass
128

    
129
  def ExpandNames(self):
130
    """Expand names for this LU.
131

132
    This method is called before starting to execute the opcode, and it should
133
    update all the parameters of the opcode to their canonical form (e.g. a
134
    short node name must be fully expanded after this method has successfully
135
    completed). This way locking, hooks, logging, ecc. can work correctly.
136

137
    LUs which implement this method must also populate the self.needed_locks
138
    member, as a dict with lock levels as keys, and a list of needed lock names
139
    as values. Rules:
140

141
      - use an empty dict if you don't need any lock
142
      - if you don't need any lock at a particular level omit that level
143
      - don't put anything for the BGL level
144
      - if you want all locks at a level use locking.ALL_SET as a value
145

146
    If you need to share locks (rather than acquire them exclusively) at one
147
    level you can modify self.share_locks, setting a true value (usually 1) for
148
    that level. By default locks are not shared.
149

150
    Examples::
151

152
      # Acquire all nodes and one instance
153
      self.needed_locks = {
154
        locking.LEVEL_NODE: locking.ALL_SET,
155
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
156
      }
157
      # Acquire just two nodes
158
      self.needed_locks = {
159
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
160
      }
161
      # Acquire no locks
162
      self.needed_locks = {} # No, you can't leave it to the default value None
163

164
    """
165
    # The implementation of this method is mandatory only if the new LU is
166
    # concurrent, so that old LUs don't need to be changed all at the same
167
    # time.
168
    if self.REQ_BGL:
169
      self.needed_locks = {} # Exclusive LUs don't need locks.
170
    else:
171
      raise NotImplementedError
172

    
173
  def DeclareLocks(self, level):
174
    """Declare LU locking needs for a level
175

176
    While most LUs can just declare their locking needs at ExpandNames time,
177
    sometimes there's the need to calculate some locks after having acquired
178
    the ones before. This function is called just before acquiring locks at a
179
    particular level, but after acquiring the ones at lower levels, and permits
180
    such calculations. It can be used to modify self.needed_locks, and by
181
    default it does nothing.
182

183
    This function is only called if you have something already set in
184
    self.needed_locks for the level.
185

186
    @param level: Locking level which is going to be locked
187
    @type level: member of ganeti.locking.LEVELS
188

189
    """
190

    
191
  def CheckPrereq(self):
192
    """Check prerequisites for this LU.
193

194
    This method should check that the prerequisites for the execution
195
    of this LU are fulfilled. It can do internode communication, but
196
    it should be idempotent - no cluster or system changes are
197
    allowed.
198

199
    The method should raise errors.OpPrereqError in case something is
200
    not fulfilled. Its return value is ignored.
201

202
    This method should also update all the parameters of the opcode to
203
    their canonical form if it hasn't been done by ExpandNames before.
204

205
    """
206
    raise NotImplementedError
207

    
208
  def Exec(self, feedback_fn):
209
    """Execute the LU.
210

211
    This method should implement the actual work. It should raise
212
    errors.OpExecError for failures that are somewhat dealt with in
213
    code, or expected.
214

215
    """
216
    raise NotImplementedError
217

    
218
  def BuildHooksEnv(self):
219
    """Build hooks environment for this LU.
220

221
    This method should return a three-node tuple consisting of: a dict
222
    containing the environment that will be used for running the
223
    specific hook for this LU, a list of node names on which the hook
224
    should run before the execution, and a list of node names on which
225
    the hook should run after the execution.
226

227
    The keys of the dict must not have 'GANETI_' prefixed as this will
228
    be handled in the hooks runner. Also note additional keys will be
229
    added by the hooks runner. If the LU doesn't define any
230
    environment, an empty dict (and not None) should be returned.
231

232
    No nodes should be returned as an empty list (and not None).
233

234
    Note that if the HPATH for a LU class is None, this function will
235
    not be called.
236

237
    """
238
    raise NotImplementedError
239

    
240
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
241
    """Notify the LU about the results of its hooks.
242

243
    This method is called every time a hooks phase is executed, and notifies
244
    the Logical Unit about the hooks' result. The LU can then use it to alter
245
    its result based on the hooks.  By default the method does nothing and the
246
    previous result is passed back unchanged but any LU can define it if it
247
    wants to use the local cluster hook-scripts somehow.
248

249
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
250
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
251
    @param hook_results: the results of the multi-node hooks rpc call
252
    @param feedback_fn: function used send feedback back to the caller
253
    @param lu_result: the previous Exec result this LU had, or None
254
        in the PRE phase
255
    @return: the new Exec result, based on the previous result
256
        and hook results
257

258
    """
259
    return lu_result
260

    
261
  def _ExpandAndLockInstance(self):
262
    """Helper function to expand and lock an instance.
263

264
    Many LUs that work on an instance take its name in self.op.instance_name
265
    and need to expand it and then declare the expanded name for locking. This
266
    function does it, and then updates self.op.instance_name to the expanded
267
    name. It also initializes needed_locks as a dict, if this hasn't been done
268
    before.
269

270
    """
271
    if self.needed_locks is None:
272
      self.needed_locks = {}
273
    else:
274
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
275
        "_ExpandAndLockInstance called with instance-level locks set"
276
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
277
    if expanded_name is None:
278
      raise errors.OpPrereqError("Instance '%s' not known" %
279
                                  self.op.instance_name)
280
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
281
    self.op.instance_name = expanded_name
282

    
283
  def _LockInstancesNodes(self, primary_only=False):
284
    """Helper function to declare instances' nodes for locking.
285

286
    This function should be called after locking one or more instances to lock
287
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
288
    with all primary or secondary nodes for instances already locked and
289
    present in self.needed_locks[locking.LEVEL_INSTANCE].
290

291
    It should be called from DeclareLocks, and for safety only works if
292
    self.recalculate_locks[locking.LEVEL_NODE] is set.
293

294
    In the future it may grow parameters to just lock some instance's nodes, or
295
    to just lock primaries or secondary nodes, if needed.
296

297
    If should be called in DeclareLocks in a way similar to::
298

299
      if level == locking.LEVEL_NODE:
300
        self._LockInstancesNodes()
301

302
    @type primary_only: boolean
303
    @param primary_only: only lock primary nodes of locked instances
304

305
    """
306
    assert locking.LEVEL_NODE in self.recalculate_locks, \
307
      "_LockInstancesNodes helper function called with no nodes to recalculate"
308

    
309
    # TODO: check if we're really been called with the instance locks held
310

    
311
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
312
    # future we might want to have different behaviors depending on the value
313
    # of self.recalculate_locks[locking.LEVEL_NODE]
314
    wanted_nodes = []
315
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
316
      instance = self.context.cfg.GetInstanceInfo(instance_name)
317
      wanted_nodes.append(instance.primary_node)
318
      if not primary_only:
319
        wanted_nodes.extend(instance.secondary_nodes)
320

    
321
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
322
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
323
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
324
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
325

    
326
    del self.recalculate_locks[locking.LEVEL_NODE]
327

    
328

    
329
class NoHooksLU(LogicalUnit):
330
  """Simple LU which runs no hooks.
331

332
  This LU is intended as a parent for other LogicalUnits which will
333
  run no hooks, in order to reduce duplicate code.
334

335
  """
336
  HPATH = None
337
  HTYPE = None
338

    
339

    
340
def _GetWantedNodes(lu, nodes):
341
  """Returns list of checked and expanded node names.
342

343
  @type lu: L{LogicalUnit}
344
  @param lu: the logical unit on whose behalf we execute
345
  @type nodes: list
346
  @param nodes: list of node names or None for all nodes
347
  @rtype: list
348
  @return: the list of nodes, sorted
349
  @raise errors.OpProgrammerError: if the nodes parameter is wrong type
350

351
  """
352
  if not isinstance(nodes, list):
353
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
354

    
355
  if not nodes:
356
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
357
      " non-empty list of nodes whose name is to be expanded.")
358

    
359
  wanted = []
360
  for name in nodes:
361
    node = lu.cfg.ExpandNodeName(name)
362
    if node is None:
363
      raise errors.OpPrereqError("No such node name '%s'" % name)
364
    wanted.append(node)
365

    
366
  return utils.NiceSort(wanted)
367

    
368

    
369
def _GetWantedInstances(lu, instances):
370
  """Returns list of checked and expanded instance names.
371

372
  @type lu: L{LogicalUnit}
373
  @param lu: the logical unit on whose behalf we execute
374
  @type instances: list
375
  @param instances: list of instance names or None for all instances
376
  @rtype: list
377
  @return: the list of instances, sorted
378
  @raise errors.OpPrereqError: if the instances parameter is wrong type
379
  @raise errors.OpPrereqError: if any of the passed instances is not found
380

381
  """
382
  if not isinstance(instances, list):
383
    raise errors.OpPrereqError("Invalid argument type 'instances'")
384

    
385
  if instances:
386
    wanted = []
387

    
388
    for name in instances:
389
      instance = lu.cfg.ExpandInstanceName(name)
390
      if instance is None:
391
        raise errors.OpPrereqError("No such instance name '%s'" % name)
392
      wanted.append(instance)
393

    
394
  else:
395
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
396
  return wanted
397

    
398

    
399
def _CheckOutputFields(static, dynamic, selected):
400
  """Checks whether all selected fields are valid.
401

402
  @type static: L{utils.FieldSet}
403
  @param static: static fields set
404
  @type dynamic: L{utils.FieldSet}
405
  @param dynamic: dynamic fields set
406

407
  """
408
  f = utils.FieldSet()
409
  f.Extend(static)
410
  f.Extend(dynamic)
411

    
412
  delta = f.NonMatching(selected)
413
  if delta:
414
    raise errors.OpPrereqError("Unknown output fields selected: %s"
415
                               % ",".join(delta))
416

    
417

    
418
def _CheckBooleanOpField(op, name):
419
  """Validates boolean opcode parameters.
420

421
  This will ensure that an opcode parameter is either a boolean value,
422
  or None (but that it always exists).
423

424
  """
425
  val = getattr(op, name, None)
426
  if not (val is None or isinstance(val, bool)):
427
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
428
                               (name, str(val)))
429
  setattr(op, name, val)
430

    
431

    
432
def _CheckNodeOnline(lu, node):
433
  """Ensure that a given node is online.
434

435
  @param lu: the LU on behalf of which we make the check
436
  @param node: the node to check
437
  @raise errors.OpPrereqError: if the nodes is offline
438

439
  """
440
  if lu.cfg.GetNodeInfo(node).offline:
441
    raise errors.OpPrereqError("Can't use offline node %s" % node)
442

    
443

    
444
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
445
                          memory, vcpus, nics):
446
  """Builds instance related env variables for hooks
447

448
  This builds the hook environment from individual variables.
449

450
  @type name: string
451
  @param name: the name of the instance
452
  @type primary_node: string
453
  @param primary_node: the name of the instance's primary node
454
  @type secondary_nodes: list
455
  @param secondary_nodes: list of secondary nodes as strings
456
  @type os_type: string
457
  @param os_type: the name of the instance's OS
458
  @type status: boolean
459
  @param status: the should_run status of the instance
460
  @type memory: string
461
  @param memory: the memory size of the instance
462
  @type vcpus: string
463
  @param vcpus: the count of VCPUs the instance has
464
  @type nics: list
465
  @param nics: list of tuples (ip, bridge, mac) representing
466
      the NICs the instance  has
467
  @rtype: dict
468
  @return: the hook environment for this instance
469

470
  """
471
  if status:
472
    str_status = "up"
473
  else:
474
    str_status = "down"
475
  env = {
476
    "OP_TARGET": name,
477
    "INSTANCE_NAME": name,
478
    "INSTANCE_PRIMARY": primary_node,
479
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
480
    "INSTANCE_OS_TYPE": os_type,
481
    "INSTANCE_STATUS": str_status,
482
    "INSTANCE_MEMORY": memory,
483
    "INSTANCE_VCPUS": vcpus,
484
  }
485

    
486
  if nics:
487
    nic_count = len(nics)
488
    for idx, (ip, bridge, mac) in enumerate(nics):
489
      if ip is None:
490
        ip = ""
491
      env["INSTANCE_NIC%d_IP" % idx] = ip
492
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
493
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
494
  else:
495
    nic_count = 0
496

    
497
  env["INSTANCE_NIC_COUNT"] = nic_count
498

    
499
  return env
500

    
501

    
502
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
503
  """Builds instance related env variables for hooks from an object.
504

505
  @type lu: L{LogicalUnit}
506
  @param lu: the logical unit on whose behalf we execute
507
  @type instance: L{objects.Instance}
508
  @param instance: the instance for which we should build the
509
      environment
510
  @type override: dict
511
  @param override: dictionary with key/values that will override
512
      our values
513
  @rtype: dict
514
  @return: the hook environment dictionary
515

516
  """
517
  bep = lu.cfg.GetClusterInfo().FillBE(instance)
518
  args = {
519
    'name': instance.name,
520
    'primary_node': instance.primary_node,
521
    'secondary_nodes': instance.secondary_nodes,
522
    'os_type': instance.os,
523
    'status': instance.admin_up,
524
    'memory': bep[constants.BE_MEMORY],
525
    'vcpus': bep[constants.BE_VCPUS],
526
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
527
  }
528
  if override:
529
    args.update(override)
530
  return _BuildInstanceHookEnv(**args)
531

    
532

    
533
def _AdjustCandidatePool(lu):
534
  """Adjust the candidate pool after node operations.
535

536
  """
537
  mod_list = lu.cfg.MaintainCandidatePool()
538
  if mod_list:
539
    lu.LogInfo("Promoted nodes to master candidate role: %s",
540
               ", ".join(node.name for node in mod_list))
541
    for name in mod_list:
542
      lu.context.ReaddNode(name)
543
  mc_now, mc_max = lu.cfg.GetMasterCandidateStats()
544
  if mc_now > mc_max:
545
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
546
               (mc_now, mc_max))
547

    
548

    
549
def _CheckInstanceBridgesExist(lu, instance):
550
  """Check that the brigdes needed by an instance exist.
551

552
  """
553
  # check bridges existance
554
  brlist = [nic.bridge for nic in instance.nics]
555
  result = lu.rpc.call_bridges_exist(instance.primary_node, brlist)
556
  result.Raise()
557
  if not result.data:
558
    raise errors.OpPrereqError("One or more target bridges %s does not"
559
                               " exist on destination node '%s'" %
560
                               (brlist, instance.primary_node))
561

    
562

    
563
class LUDestroyCluster(NoHooksLU):
564
  """Logical unit for destroying the cluster.
565

566
  """
567
  _OP_REQP = []
568

    
569
  def CheckPrereq(self):
570
    """Check prerequisites.
571

572
    This checks whether the cluster is empty.
573

574
    Any errors are signalled by raising errors.OpPrereqError.
575

576
    """
577
    master = self.cfg.GetMasterNode()
578

    
579
    nodelist = self.cfg.GetNodeList()
580
    if len(nodelist) != 1 or nodelist[0] != master:
581
      raise errors.OpPrereqError("There are still %d node(s) in"
582
                                 " this cluster." % (len(nodelist) - 1))
583
    instancelist = self.cfg.GetInstanceList()
584
    if instancelist:
585
      raise errors.OpPrereqError("There are still %d instance(s) in"
586
                                 " this cluster." % len(instancelist))
587

    
588
  def Exec(self, feedback_fn):
589
    """Destroys the cluster.
590

591
    """
592
    master = self.cfg.GetMasterNode()
593
    result = self.rpc.call_node_stop_master(master, False)
594
    result.Raise()
595
    if not result.data:
596
      raise errors.OpExecError("Could not disable the master role")
597
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
598
    utils.CreateBackup(priv_key)
599
    utils.CreateBackup(pub_key)
600
    return master
601

    
602

    
603
class LUVerifyCluster(LogicalUnit):
604
  """Verifies the cluster status.
605

606
  """
607
  HPATH = "cluster-verify"
608
  HTYPE = constants.HTYPE_CLUSTER
609
  _OP_REQP = ["skip_checks"]
610
  REQ_BGL = False
611

    
612
  def ExpandNames(self):
613
    self.needed_locks = {
614
      locking.LEVEL_NODE: locking.ALL_SET,
615
      locking.LEVEL_INSTANCE: locking.ALL_SET,
616
    }
617
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
618

    
619
  def _VerifyNode(self, nodeinfo, file_list, local_cksum,
620
                  node_result, feedback_fn, master_files,
621
                  drbd_map):
622
    """Run multiple tests against a node.
623

624
    Test list:
625

626
      - compares ganeti version
627
      - checks vg existance and size > 20G
628
      - checks config file checksum
629
      - checks ssh to other nodes
630

631
    @type nodeinfo: L{objects.Node}
632
    @param nodeinfo: the node to check
633
    @param file_list: required list of files
634
    @param local_cksum: dictionary of local files and their checksums
635
    @param node_result: the results from the node
636
    @param feedback_fn: function used to accumulate results
637
    @param master_files: list of files that only masters should have
638
    @param drbd_map: the useddrbd minors for this node, in
639
        form of minor: (instance, must_exist) which correspond to instances
640
        and their running status
641

642
    """
643
    node = nodeinfo.name
644

    
645
    # main result, node_result should be a non-empty dict
646
    if not node_result or not isinstance(node_result, dict):
647
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
648
      return True
649

    
650
    # compares ganeti version
651
    local_version = constants.PROTOCOL_VERSION
652
    remote_version = node_result.get('version', None)
653
    if not (remote_version and isinstance(remote_version, (list, tuple)) and
654
            len(remote_version) == 2):
655
      feedback_fn("  - ERROR: connection to %s failed" % (node))
656
      return True
657

    
658
    if local_version != remote_version[0]:
659
      feedback_fn("  - ERROR: incompatible protocol versions: master %s,"
660
                  " node %s %s" % (local_version, node, remote_version[0]))
661
      return True
662

    
663
    # node seems compatible, we can actually try to look into its results
664

    
665
    bad = False
666

    
667
    # full package version
668
    if constants.RELEASE_VERSION != remote_version[1]:
669
      feedback_fn("  - WARNING: software version mismatch: master %s,"
670
                  " node %s %s" %
671
                  (constants.RELEASE_VERSION, node, remote_version[1]))
672

    
673
    # checks vg existence and size > 20G
674

    
675
    vglist = node_result.get(constants.NV_VGLIST, None)
676
    if not vglist:
677
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
678
                      (node,))
679
      bad = True
680
    else:
681
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
682
                                            constants.MIN_VG_SIZE)
683
      if vgstatus:
684
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
685
        bad = True
686

    
687
    # checks config file checksum
688

    
689
    remote_cksum = node_result.get(constants.NV_FILELIST, None)
690
    if not isinstance(remote_cksum, dict):
691
      bad = True
692
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
693
    else:
694
      for file_name in file_list:
695
        node_is_mc = nodeinfo.master_candidate
696
        must_have_file = file_name not in master_files
697
        if file_name not in remote_cksum:
698
          if node_is_mc or must_have_file:
699
            bad = True
700
            feedback_fn("  - ERROR: file '%s' missing" % file_name)
701
        elif remote_cksum[file_name] != local_cksum[file_name]:
702
          if node_is_mc or must_have_file:
703
            bad = True
704
            feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
705
          else:
706
            # not candidate and this is not a must-have file
707
            bad = True
708
            feedback_fn("  - ERROR: non master-candidate has old/wrong file"
709
                        " '%s'" % file_name)
710
        else:
711
          # all good, except non-master/non-must have combination
712
          if not node_is_mc and not must_have_file:
713
            feedback_fn("  - ERROR: file '%s' should not exist on non master"
714
                        " candidates" % file_name)
715

    
716
    # checks ssh to any
717

    
718
    if constants.NV_NODELIST not in node_result:
719
      bad = True
720
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
721
    else:
722
      if node_result[constants.NV_NODELIST]:
723
        bad = True
724
        for node in node_result[constants.NV_NODELIST]:
725
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
726
                          (node, node_result[constants.NV_NODELIST][node]))
727

    
728
    if constants.NV_NODENETTEST not in node_result:
729
      bad = True
730
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
731
    else:
732
      if node_result[constants.NV_NODENETTEST]:
733
        bad = True
734
        nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys())
735
        for node in nlist:
736
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
737
                          (node, node_result[constants.NV_NODENETTEST][node]))
738

    
739
    hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
740
    if isinstance(hyp_result, dict):
741
      for hv_name, hv_result in hyp_result.iteritems():
742
        if hv_result is not None:
743
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
744
                      (hv_name, hv_result))
745

    
746
    # check used drbd list
747
    used_minors = node_result.get(constants.NV_DRBDLIST, [])
748
    for minor, (iname, must_exist) in drbd_map.items():
749
      if minor not in used_minors and must_exist:
750
        feedback_fn("  - ERROR: drbd minor %d of instance %s is not active" %
751
                    (minor, iname))
752
        bad = True
753
    for minor in used_minors:
754
      if minor not in drbd_map:
755
        feedback_fn("  - ERROR: unallocated drbd minor %d is in use" % minor)
756
        bad = True
757

    
758
    return bad
759

    
760
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
761
                      node_instance, feedback_fn, n_offline):
762
    """Verify an instance.
763

764
    This function checks to see if the required block devices are
765
    available on the instance's node.
766

767
    """
768
    bad = False
769

    
770
    node_current = instanceconfig.primary_node
771

    
772
    node_vol_should = {}
773
    instanceconfig.MapLVsByNode(node_vol_should)
774

    
775
    for node in node_vol_should:
776
      if node in n_offline:
777
        # ignore missing volumes on offline nodes
778
        continue
779
      for volume in node_vol_should[node]:
780
        if node not in node_vol_is or volume not in node_vol_is[node]:
781
          feedback_fn("  - ERROR: volume %s missing on node %s" %
782
                          (volume, node))
783
          bad = True
784

    
785
    if instanceconfig.admin_up:
786
      if ((node_current not in node_instance or
787
          not instance in node_instance[node_current]) and
788
          node_current not in n_offline):
789
        feedback_fn("  - ERROR: instance %s not running on node %s" %
790
                        (instance, node_current))
791
        bad = True
792

    
793
    for node in node_instance:
794
      if (not node == node_current):
795
        if instance in node_instance[node]:
796
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
797
                          (instance, node))
798
          bad = True
799

    
800
    return bad
801

    
802
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
803
    """Verify if there are any unknown volumes in the cluster.
804

805
    The .os, .swap and backup volumes are ignored. All other volumes are
806
    reported as unknown.
807

808
    """
809
    bad = False
810

    
811
    for node in node_vol_is:
812
      for volume in node_vol_is[node]:
813
        if node not in node_vol_should or volume not in node_vol_should[node]:
814
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
815
                      (volume, node))
816
          bad = True
817
    return bad
818

    
819
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
820
    """Verify the list of running instances.
821

822
    This checks what instances are running but unknown to the cluster.
823

824
    """
825
    bad = False
826
    for node in node_instance:
827
      for runninginstance in node_instance[node]:
828
        if runninginstance not in instancelist:
829
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
830
                          (runninginstance, node))
831
          bad = True
832
    return bad
833

    
834
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
835
    """Verify N+1 Memory Resilience.
836

837
    Check that if one single node dies we can still start all the instances it
838
    was primary for.
839

840
    """
841
    bad = False
842

    
843
    for node, nodeinfo in node_info.iteritems():
844
      # This code checks that every node which is now listed as secondary has
845
      # enough memory to host all instances it is supposed to should a single
846
      # other node in the cluster fail.
847
      # FIXME: not ready for failover to an arbitrary node
848
      # FIXME: does not support file-backed instances
849
      # WARNING: we currently take into account down instances as well as up
850
      # ones, considering that even if they're down someone might want to start
851
      # them even in the event of a node failure.
852
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
853
        needed_mem = 0
854
        for instance in instances:
855
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
856
          if bep[constants.BE_AUTO_BALANCE]:
857
            needed_mem += bep[constants.BE_MEMORY]
858
        if nodeinfo['mfree'] < needed_mem:
859
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
860
                      " failovers should node %s fail" % (node, prinode))
861
          bad = True
862
    return bad
863

    
864
  def CheckPrereq(self):
865
    """Check prerequisites.
866

867
    Transform the list of checks we're going to skip into a set and check that
868
    all its members are valid.
869

870
    """
871
    self.skip_set = frozenset(self.op.skip_checks)
872
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
873
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
874

    
875
  def BuildHooksEnv(self):
876
    """Build hooks env.
877

878
    Cluster-Verify hooks just rone in the post phase and their failure makes
879
    the output be logged in the verify output and the verification to fail.
880

881
    """
882
    all_nodes = self.cfg.GetNodeList()
883
    # TODO: populate the environment with useful information for verify hooks
884
    env = {}
885
    return env, [], all_nodes
886

    
887
  def Exec(self, feedback_fn):
888
    """Verify integrity of cluster, performing various test on nodes.
889

890
    """
891
    bad = False
892
    feedback_fn("* Verifying global settings")
893
    for msg in self.cfg.VerifyConfig():
894
      feedback_fn("  - ERROR: %s" % msg)
895

    
896
    vg_name = self.cfg.GetVGName()
897
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
898
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
899
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
900
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
901
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
902
                        for iname in instancelist)
903
    i_non_redundant = [] # Non redundant instances
904
    i_non_a_balanced = [] # Non auto-balanced instances
905
    n_offline = [] # List of offline nodes
906
    node_volume = {}
907
    node_instance = {}
908
    node_info = {}
909
    instance_cfg = {}
910

    
911
    # FIXME: verify OS list
912
    # do local checksums
913
    master_files = [constants.CLUSTER_CONF_FILE]
914

    
915
    file_names = ssconf.SimpleStore().GetFileList()
916
    file_names.append(constants.SSL_CERT_FILE)
917
    file_names.append(constants.RAPI_CERT_FILE)
918
    file_names.extend(master_files)
919

    
920
    local_checksums = utils.FingerprintFiles(file_names)
921

    
922
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
923
    node_verify_param = {
924
      constants.NV_FILELIST: file_names,
925
      constants.NV_NODELIST: [node.name for node in nodeinfo
926
                              if not node.offline],
927
      constants.NV_HYPERVISOR: hypervisors,
928
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
929
                                  node.secondary_ip) for node in nodeinfo
930
                                 if not node.offline],
931
      constants.NV_LVLIST: vg_name,
932
      constants.NV_INSTANCELIST: hypervisors,
933
      constants.NV_VGLIST: None,
934
      constants.NV_VERSION: None,
935
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
936
      constants.NV_DRBDLIST: None,
937
      }
938
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
939
                                           self.cfg.GetClusterName())
940

    
941
    cluster = self.cfg.GetClusterInfo()
942
    master_node = self.cfg.GetMasterNode()
943
    all_drbd_map = self.cfg.ComputeDRBDMap()
944

    
945
    for node_i in nodeinfo:
946
      node = node_i.name
947
      nresult = all_nvinfo[node].data
948

    
949
      if node_i.offline:
950
        feedback_fn("* Skipping offline node %s" % (node,))
951
        n_offline.append(node)
952
        continue
953

    
954
      if node == master_node:
955
        ntype = "master"
956
      elif node_i.master_candidate:
957
        ntype = "master candidate"
958
      else:
959
        ntype = "regular"
960
      feedback_fn("* Verifying node %s (%s)" % (node, ntype))
961

    
962
      if all_nvinfo[node].failed or not isinstance(nresult, dict):
963
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
964
        bad = True
965
        continue
966

    
967
      node_drbd = {}
968
      for minor, instance in all_drbd_map[node].items():
969
        instance = instanceinfo[instance]
970
        node_drbd[minor] = (instance.name, instance.admin_up)
971
      result = self._VerifyNode(node_i, file_names, local_checksums,
972
                                nresult, feedback_fn, master_files,
973
                                node_drbd)
974
      bad = bad or result
975

    
976
      lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
977
      if isinstance(lvdata, basestring):
978
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
979
                    (node, lvdata.encode('string_escape')))
980
        bad = True
981
        node_volume[node] = {}
982
      elif not isinstance(lvdata, dict):
983
        feedback_fn("  - ERROR: connection to %s failed (lvlist)" % (node,))
984
        bad = True
985
        continue
986
      else:
987
        node_volume[node] = lvdata
988

    
989
      # node_instance
990
      idata = nresult.get(constants.NV_INSTANCELIST, None)
991
      if not isinstance(idata, list):
992
        feedback_fn("  - ERROR: connection to %s failed (instancelist)" %
993
                    (node,))
994
        bad = True
995
        continue
996

    
997
      node_instance[node] = idata
998

    
999
      # node_info
1000
      nodeinfo = nresult.get(constants.NV_HVINFO, None)
1001
      if not isinstance(nodeinfo, dict):
1002
        feedback_fn("  - ERROR: connection to %s failed (hvinfo)" % (node,))
1003
        bad = True
1004
        continue
1005

    
1006
      try:
1007
        node_info[node] = {
1008
          "mfree": int(nodeinfo['memory_free']),
1009
          "dfree": int(nresult[constants.NV_VGLIST][vg_name]),
1010
          "pinst": [],
1011
          "sinst": [],
1012
          # dictionary holding all instances this node is secondary for,
1013
          # grouped by their primary node. Each key is a cluster node, and each
1014
          # value is a list of instances which have the key as primary and the
1015
          # current node as secondary.  this is handy to calculate N+1 memory
1016
          # availability if you can only failover from a primary to its
1017
          # secondary.
1018
          "sinst-by-pnode": {},
1019
        }
1020
      except ValueError:
1021
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
1022
        bad = True
1023
        continue
1024

    
1025
    node_vol_should = {}
1026

    
1027
    for instance in instancelist:
1028
      feedback_fn("* Verifying instance %s" % instance)
1029
      inst_config = instanceinfo[instance]
1030
      result =  self._VerifyInstance(instance, inst_config, node_volume,
1031
                                     node_instance, feedback_fn, n_offline)
1032
      bad = bad or result
1033
      inst_nodes_offline = []
1034

    
1035
      inst_config.MapLVsByNode(node_vol_should)
1036

    
1037
      instance_cfg[instance] = inst_config
1038

    
1039
      pnode = inst_config.primary_node
1040
      if pnode in node_info:
1041
        node_info[pnode]['pinst'].append(instance)
1042
      elif pnode not in n_offline:
1043
        feedback_fn("  - ERROR: instance %s, connection to primary node"
1044
                    " %s failed" % (instance, pnode))
1045
        bad = True
1046

    
1047
      if pnode in n_offline:
1048
        inst_nodes_offline.append(pnode)
1049

    
1050
      # If the instance is non-redundant we cannot survive losing its primary
1051
      # node, so we are not N+1 compliant. On the other hand we have no disk
1052
      # templates with more than one secondary so that situation is not well
1053
      # supported either.
1054
      # FIXME: does not support file-backed instances
1055
      if len(inst_config.secondary_nodes) == 0:
1056
        i_non_redundant.append(instance)
1057
      elif len(inst_config.secondary_nodes) > 1:
1058
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
1059
                    % instance)
1060

    
1061
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1062
        i_non_a_balanced.append(instance)
1063

    
1064
      for snode in inst_config.secondary_nodes:
1065
        if snode in node_info:
1066
          node_info[snode]['sinst'].append(instance)
1067
          if pnode not in node_info[snode]['sinst-by-pnode']:
1068
            node_info[snode]['sinst-by-pnode'][pnode] = []
1069
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
1070
        elif snode not in n_offline:
1071
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
1072
                      " %s failed" % (instance, snode))
1073
          bad = True
1074
        if snode in n_offline:
1075
          inst_nodes_offline.append(snode)
1076

    
1077
      if inst_nodes_offline:
1078
        # warn that the instance lives on offline nodes, and set bad=True
1079
        feedback_fn("  - ERROR: instance lives on offline node(s) %s" %
1080
                    ", ".join(inst_nodes_offline))
1081
        bad = True
1082

    
1083
    feedback_fn("* Verifying orphan volumes")
1084
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
1085
                                       feedback_fn)
1086
    bad = bad or result
1087

    
1088
    feedback_fn("* Verifying remaining instances")
1089
    result = self._VerifyOrphanInstances(instancelist, node_instance,
1090
                                         feedback_fn)
1091
    bad = bad or result
1092

    
1093
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1094
      feedback_fn("* Verifying N+1 Memory redundancy")
1095
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
1096
      bad = bad or result
1097

    
1098
    feedback_fn("* Other Notes")
1099
    if i_non_redundant:
1100
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1101
                  % len(i_non_redundant))
1102

    
1103
    if i_non_a_balanced:
1104
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1105
                  % len(i_non_a_balanced))
1106

    
1107
    if n_offline:
1108
      feedback_fn("  - NOTICE: %d offline node(s) found." % len(n_offline))
1109

    
1110
    return not bad
1111

    
1112
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1113
    """Analize the post-hooks' result
1114

1115
    This method analyses the hook result, handles it, and sends some
1116
    nicely-formatted feedback back to the user.
1117

1118
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1119
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1120
    @param hooks_results: the results of the multi-node hooks rpc call
1121
    @param feedback_fn: function used send feedback back to the caller
1122
    @param lu_result: previous Exec result
1123
    @return: the new Exec result, based on the previous result
1124
        and hook results
1125

1126
    """
1127
    # We only really run POST phase hooks, and are only interested in
1128
    # their results
1129
    if phase == constants.HOOKS_PHASE_POST:
1130
      # Used to change hooks' output to proper indentation
1131
      indent_re = re.compile('^', re.M)
1132
      feedback_fn("* Hooks Results")
1133
      if not hooks_results:
1134
        feedback_fn("  - ERROR: general communication failure")
1135
        lu_result = 1
1136
      else:
1137
        for node_name in hooks_results:
1138
          show_node_header = True
1139
          res = hooks_results[node_name]
1140
          if res.failed or res.data is False or not isinstance(res.data, list):
1141
            if res.offline:
1142
              # no need to warn or set fail return value
1143
              continue
1144
            feedback_fn("    Communication failure in hooks execution")
1145
            lu_result = 1
1146
            continue
1147
          for script, hkr, output in res.data:
1148
            if hkr == constants.HKR_FAIL:
1149
              # The node header is only shown once, if there are
1150
              # failing hooks on that node
1151
              if show_node_header:
1152
                feedback_fn("  Node %s:" % node_name)
1153
                show_node_header = False
1154
              feedback_fn("    ERROR: Script %s failed, output:" % script)
1155
              output = indent_re.sub('      ', output)
1156
              feedback_fn("%s" % output)
1157
              lu_result = 1
1158

    
1159
      return lu_result
1160

    
1161

    
1162
class LUVerifyDisks(NoHooksLU):
1163
  """Verifies the cluster disks status.
1164

1165
  """
1166
  _OP_REQP = []
1167
  REQ_BGL = False
1168

    
1169
  def ExpandNames(self):
1170
    self.needed_locks = {
1171
      locking.LEVEL_NODE: locking.ALL_SET,
1172
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1173
    }
1174
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1175

    
1176
  def CheckPrereq(self):
1177
    """Check prerequisites.
1178

1179
    This has no prerequisites.
1180

1181
    """
1182
    pass
1183

    
1184
  def Exec(self, feedback_fn):
1185
    """Verify integrity of cluster disks.
1186

1187
    """
1188
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
1189

    
1190
    vg_name = self.cfg.GetVGName()
1191
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1192
    instances = [self.cfg.GetInstanceInfo(name)
1193
                 for name in self.cfg.GetInstanceList()]
1194

    
1195
    nv_dict = {}
1196
    for inst in instances:
1197
      inst_lvs = {}
1198
      if (not inst.admin_up or
1199
          inst.disk_template not in constants.DTS_NET_MIRROR):
1200
        continue
1201
      inst.MapLVsByNode(inst_lvs)
1202
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1203
      for node, vol_list in inst_lvs.iteritems():
1204
        for vol in vol_list:
1205
          nv_dict[(node, vol)] = inst
1206

    
1207
    if not nv_dict:
1208
      return result
1209

    
1210
    node_lvs = self.rpc.call_volume_list(nodes, vg_name)
1211

    
1212
    to_act = set()
1213
    for node in nodes:
1214
      # node_volume
1215
      lvs = node_lvs[node]
1216
      if lvs.failed:
1217
        if not lvs.offline:
1218
          self.LogWarning("Connection to node %s failed: %s" %
1219
                          (node, lvs.data))
1220
        continue
1221
      lvs = lvs.data
1222
      if isinstance(lvs, basestring):
1223
        logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
1224
        res_nlvm[node] = lvs
1225
      elif not isinstance(lvs, dict):
1226
        logging.warning("Connection to node %s failed or invalid data"
1227
                        " returned", node)
1228
        res_nodes.append(node)
1229
        continue
1230

    
1231
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
1232
        inst = nv_dict.pop((node, lv_name), None)
1233
        if (not lv_online and inst is not None
1234
            and inst.name not in res_instances):
1235
          res_instances.append(inst.name)
1236

    
1237
    # any leftover items in nv_dict are missing LVs, let's arrange the
1238
    # data better
1239
    for key, inst in nv_dict.iteritems():
1240
      if inst.name not in res_missing:
1241
        res_missing[inst.name] = []
1242
      res_missing[inst.name].append(key)
1243

    
1244
    return result
1245

    
1246

    
1247
class LURenameCluster(LogicalUnit):
1248
  """Rename the cluster.
1249

1250
  """
1251
  HPATH = "cluster-rename"
1252
  HTYPE = constants.HTYPE_CLUSTER
1253
  _OP_REQP = ["name"]
1254

    
1255
  def BuildHooksEnv(self):
1256
    """Build hooks env.
1257

1258
    """
1259
    env = {
1260
      "OP_TARGET": self.cfg.GetClusterName(),
1261
      "NEW_NAME": self.op.name,
1262
      }
1263
    mn = self.cfg.GetMasterNode()
1264
    return env, [mn], [mn]
1265

    
1266
  def CheckPrereq(self):
1267
    """Verify that the passed name is a valid one.
1268

1269
    """
1270
    hostname = utils.HostInfo(self.op.name)
1271

    
1272
    new_name = hostname.name
1273
    self.ip = new_ip = hostname.ip
1274
    old_name = self.cfg.GetClusterName()
1275
    old_ip = self.cfg.GetMasterIP()
1276
    if new_name == old_name and new_ip == old_ip:
1277
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1278
                                 " cluster has changed")
1279
    if new_ip != old_ip:
1280
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1281
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1282
                                   " reachable on the network. Aborting." %
1283
                                   new_ip)
1284

    
1285
    self.op.name = new_name
1286

    
1287
  def Exec(self, feedback_fn):
1288
    """Rename the cluster.
1289

1290
    """
1291
    clustername = self.op.name
1292
    ip = self.ip
1293

    
1294
    # shutdown the master IP
1295
    master = self.cfg.GetMasterNode()
1296
    result = self.rpc.call_node_stop_master(master, False)
1297
    if result.failed or not result.data:
1298
      raise errors.OpExecError("Could not disable the master role")
1299

    
1300
    try:
1301
      cluster = self.cfg.GetClusterInfo()
1302
      cluster.cluster_name = clustername
1303
      cluster.master_ip = ip
1304
      self.cfg.Update(cluster)
1305

    
1306
      # update the known hosts file
1307
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
1308
      node_list = self.cfg.GetNodeList()
1309
      try:
1310
        node_list.remove(master)
1311
      except ValueError:
1312
        pass
1313
      result = self.rpc.call_upload_file(node_list,
1314
                                         constants.SSH_KNOWN_HOSTS_FILE)
1315
      for to_node, to_result in result.iteritems():
1316
        if to_result.failed or not to_result.data:
1317
          logging.error("Copy of file %s to node %s failed",
1318
                        constants.SSH_KNOWN_HOSTS_FILE, to_node)
1319

    
1320
    finally:
1321
      result = self.rpc.call_node_start_master(master, False)
1322
      if result.failed or not result.data:
1323
        self.LogWarning("Could not re-enable the master role on"
1324
                        " the master, please restart manually.")
1325

    
1326

    
1327
def _RecursiveCheckIfLVMBased(disk):
1328
  """Check if the given disk or its children are lvm-based.
1329

1330
  @type disk: L{objects.Disk}
1331
  @param disk: the disk to check
1332
  @rtype: booleean
1333
  @return: boolean indicating whether a LD_LV dev_type was found or not
1334

1335
  """
1336
  if disk.children:
1337
    for chdisk in disk.children:
1338
      if _RecursiveCheckIfLVMBased(chdisk):
1339
        return True
1340
  return disk.dev_type == constants.LD_LV
1341

    
1342

    
1343
class LUSetClusterParams(LogicalUnit):
1344
  """Change the parameters of the cluster.
1345

1346
  """
1347
  HPATH = "cluster-modify"
1348
  HTYPE = constants.HTYPE_CLUSTER
1349
  _OP_REQP = []
1350
  REQ_BGL = False
1351

    
1352
  def CheckParameters(self):
1353
    """Check parameters
1354

1355
    """
1356
    if not hasattr(self.op, "candidate_pool_size"):
1357
      self.op.candidate_pool_size = None
1358
    if self.op.candidate_pool_size is not None:
1359
      try:
1360
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1361
      except ValueError, err:
1362
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1363
                                   str(err))
1364
      if self.op.candidate_pool_size < 1:
1365
        raise errors.OpPrereqError("At least one master candidate needed")
1366

    
1367
  def ExpandNames(self):
1368
    # FIXME: in the future maybe other cluster params won't require checking on
1369
    # all nodes to be modified.
1370
    self.needed_locks = {
1371
      locking.LEVEL_NODE: locking.ALL_SET,
1372
    }
1373
    self.share_locks[locking.LEVEL_NODE] = 1
1374

    
1375
  def BuildHooksEnv(self):
1376
    """Build hooks env.
1377

1378
    """
1379
    env = {
1380
      "OP_TARGET": self.cfg.GetClusterName(),
1381
      "NEW_VG_NAME": self.op.vg_name,
1382
      }
1383
    mn = self.cfg.GetMasterNode()
1384
    return env, [mn], [mn]
1385

    
1386
  def CheckPrereq(self):
1387
    """Check prerequisites.
1388

1389
    This checks whether the given params don't conflict and
1390
    if the given volume group is valid.
1391

1392
    """
1393
    # FIXME: This only works because there is only one parameter that can be
1394
    # changed or removed.
1395
    if self.op.vg_name is not None and not self.op.vg_name:
1396
      instances = self.cfg.GetAllInstancesInfo().values()
1397
      for inst in instances:
1398
        for disk in inst.disks:
1399
          if _RecursiveCheckIfLVMBased(disk):
1400
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1401
                                       " lvm-based instances exist")
1402

    
1403
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1404

    
1405
    # if vg_name not None, checks given volume group on all nodes
1406
    if self.op.vg_name:
1407
      vglist = self.rpc.call_vg_list(node_list)
1408
      for node in node_list:
1409
        if vglist[node].failed:
1410
          # ignoring down node
1411
          self.LogWarning("Node %s unreachable/error, ignoring" % node)
1412
          continue
1413
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].data,
1414
                                              self.op.vg_name,
1415
                                              constants.MIN_VG_SIZE)
1416
        if vgstatus:
1417
          raise errors.OpPrereqError("Error on node '%s': %s" %
1418
                                     (node, vgstatus))
1419

    
1420
    self.cluster = cluster = self.cfg.GetClusterInfo()
1421
    # validate beparams changes
1422
    if self.op.beparams:
1423
      utils.CheckBEParams(self.op.beparams)
1424
      self.new_beparams = cluster.FillDict(
1425
        cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)
1426

    
1427
    # hypervisor list/parameters
1428
    self.new_hvparams = cluster.FillDict(cluster.hvparams, {})
1429
    if self.op.hvparams:
1430
      if not isinstance(self.op.hvparams, dict):
1431
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1432
      for hv_name, hv_dict in self.op.hvparams.items():
1433
        if hv_name not in self.new_hvparams:
1434
          self.new_hvparams[hv_name] = hv_dict
1435
        else:
1436
          self.new_hvparams[hv_name].update(hv_dict)
1437

    
1438
    if self.op.enabled_hypervisors is not None:
1439
      self.hv_list = self.op.enabled_hypervisors
1440
    else:
1441
      self.hv_list = cluster.enabled_hypervisors
1442

    
1443
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1444
      # either the enabled list has changed, or the parameters have, validate
1445
      for hv_name, hv_params in self.new_hvparams.items():
1446
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1447
            (self.op.enabled_hypervisors and
1448
             hv_name in self.op.enabled_hypervisors)):
1449
          # either this is a new hypervisor, or its parameters have changed
1450
          hv_class = hypervisor.GetHypervisor(hv_name)
1451
          hv_class.CheckParameterSyntax(hv_params)
1452
          _CheckHVParams(self, node_list, hv_name, hv_params)
1453

    
1454
  def Exec(self, feedback_fn):
1455
    """Change the parameters of the cluster.
1456

1457
    """
1458
    if self.op.vg_name is not None:
1459
      if self.op.vg_name != self.cfg.GetVGName():
1460
        self.cfg.SetVGName(self.op.vg_name)
1461
      else:
1462
        feedback_fn("Cluster LVM configuration already in desired"
1463
                    " state, not changing")
1464
    if self.op.hvparams:
1465
      self.cluster.hvparams = self.new_hvparams
1466
    if self.op.enabled_hypervisors is not None:
1467
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1468
    if self.op.beparams:
1469
      self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams
1470
    if self.op.candidate_pool_size is not None:
1471
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
1472

    
1473
    self.cfg.Update(self.cluster)
1474

    
1475
    # we want to update nodes after the cluster so that if any errors
1476
    # happen, we have recorded and saved the cluster info
1477
    if self.op.candidate_pool_size is not None:
1478
      _AdjustCandidatePool(self)
1479

    
1480

    
1481
class LURedistributeConfig(NoHooksLU):
1482
  """Force the redistribution of cluster configuration.
1483

1484
  This is a very simple LU.
1485

1486
  """
1487
  _OP_REQP = []
1488
  REQ_BGL = False
1489

    
1490
  def ExpandNames(self):
1491
    self.needed_locks = {
1492
      locking.LEVEL_NODE: locking.ALL_SET,
1493
    }
1494
    self.share_locks[locking.LEVEL_NODE] = 1
1495

    
1496
  def CheckPrereq(self):
1497
    """Check prerequisites.
1498

1499
    """
1500

    
1501
  def Exec(self, feedback_fn):
1502
    """Redistribute the configuration.
1503

1504
    """
1505
    self.cfg.Update(self.cfg.GetClusterInfo())
1506

    
1507

    
1508
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1509
  """Sleep and poll for an instance's disk to sync.
1510

1511
  """
1512
  if not instance.disks:
1513
    return True
1514

    
1515
  if not oneshot:
1516
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1517

    
1518
  node = instance.primary_node
1519

    
1520
  for dev in instance.disks:
1521
    lu.cfg.SetDiskID(dev, node)
1522

    
1523
  retries = 0
1524
  while True:
1525
    max_time = 0
1526
    done = True
1527
    cumul_degraded = False
1528
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1529
    if rstats.failed or not rstats.data:
1530
      lu.LogWarning("Can't get any data from node %s", node)
1531
      retries += 1
1532
      if retries >= 10:
1533
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1534
                                 " aborting." % node)
1535
      time.sleep(6)
1536
      continue
1537
    rstats = rstats.data
1538
    retries = 0
1539
    for i, mstat in enumerate(rstats):
1540
      if mstat is None:
1541
        lu.LogWarning("Can't compute data for node %s/%s",
1542
                           node, instance.disks[i].iv_name)
1543
        continue
1544
      # we ignore the ldisk parameter
1545
      perc_done, est_time, is_degraded, _ = mstat
1546
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1547
      if perc_done is not None:
1548
        done = False
1549
        if est_time is not None:
1550
          rem_time = "%d estimated seconds remaining" % est_time
1551
          max_time = est_time
1552
        else:
1553
          rem_time = "no time estimate"
1554
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1555
                        (instance.disks[i].iv_name, perc_done, rem_time))
1556
    if done or oneshot:
1557
      break
1558

    
1559
    time.sleep(min(60, max_time))
1560

    
1561
  if done:
1562
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1563
  return not cumul_degraded
1564

    
1565

    
1566
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1567
  """Check that mirrors are not degraded.
1568

1569
  The ldisk parameter, if True, will change the test from the
1570
  is_degraded attribute (which represents overall non-ok status for
1571
  the device(s)) to the ldisk (representing the local storage status).
1572

1573
  """
1574
  lu.cfg.SetDiskID(dev, node)
1575
  if ldisk:
1576
    idx = 6
1577
  else:
1578
    idx = 5
1579

    
1580
  result = True
1581
  if on_primary or dev.AssembleOnSecondary():
1582
    rstats = lu.rpc.call_blockdev_find(node, dev)
1583
    if rstats.failed or not rstats.data:
1584
      logging.warning("Node %s: disk degraded, not found or node down", node)
1585
      result = False
1586
    else:
1587
      result = result and (not rstats.data[idx])
1588
  if dev.children:
1589
    for child in dev.children:
1590
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1591

    
1592
  return result
1593

    
1594

    
1595
class LUDiagnoseOS(NoHooksLU):
1596
  """Logical unit for OS diagnose/query.
1597

1598
  """
1599
  _OP_REQP = ["output_fields", "names"]
1600
  REQ_BGL = False
1601
  _FIELDS_STATIC = utils.FieldSet()
1602
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
1603

    
1604
  def ExpandNames(self):
1605
    if self.op.names:
1606
      raise errors.OpPrereqError("Selective OS query not supported")
1607

    
1608
    _CheckOutputFields(static=self._FIELDS_STATIC,
1609
                       dynamic=self._FIELDS_DYNAMIC,
1610
                       selected=self.op.output_fields)
1611

    
1612
    # Lock all nodes, in shared mode
1613
    self.needed_locks = {}
1614
    self.share_locks[locking.LEVEL_NODE] = 1
1615
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1616

    
1617
  def CheckPrereq(self):
1618
    """Check prerequisites.
1619

1620
    """
1621

    
1622
  @staticmethod
1623
  def _DiagnoseByOS(node_list, rlist):
1624
    """Remaps a per-node return list into an a per-os per-node dictionary
1625

1626
    @param node_list: a list with the names of all nodes
1627
    @param rlist: a map with node names as keys and OS objects as values
1628

1629
    @rtype: dict
1630
    @returns: a dictionary with osnames as keys and as value another map, with
1631
        nodes as keys and list of OS objects as values, eg::
1632

1633
          {"debian-etch": {"node1": [<object>,...],
1634
                           "node2": [<object>,]}
1635
          }
1636

1637
    """
1638
    all_os = {}
1639
    for node_name, nr in rlist.iteritems():
1640
      if nr.failed or not nr.data:
1641
        continue
1642
      for os_obj in nr.data:
1643
        if os_obj.name not in all_os:
1644
          # build a list of nodes for this os containing empty lists
1645
          # for each node in node_list
1646
          all_os[os_obj.name] = {}
1647
          for nname in node_list:
1648
            all_os[os_obj.name][nname] = []
1649
        all_os[os_obj.name][node_name].append(os_obj)
1650
    return all_os
1651

    
1652
  def Exec(self, feedback_fn):
1653
    """Compute the list of OSes.
1654

1655
    """
1656
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1657
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()
1658
                   if node in node_list]
1659
    node_data = self.rpc.call_os_diagnose(valid_nodes)
1660
    if node_data == False:
1661
      raise errors.OpExecError("Can't gather the list of OSes")
1662
    pol = self._DiagnoseByOS(valid_nodes, node_data)
1663
    output = []
1664
    for os_name, os_data in pol.iteritems():
1665
      row = []
1666
      for field in self.op.output_fields:
1667
        if field == "name":
1668
          val = os_name
1669
        elif field == "valid":
1670
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1671
        elif field == "node_status":
1672
          val = {}
1673
          for node_name, nos_list in os_data.iteritems():
1674
            val[node_name] = [(v.status, v.path) for v in nos_list]
1675
        else:
1676
          raise errors.ParameterError(field)
1677
        row.append(val)
1678
      output.append(row)
1679

    
1680
    return output
1681

    
1682

    
1683
class LURemoveNode(LogicalUnit):
1684
  """Logical unit for removing a node.
1685

1686
  """
1687
  HPATH = "node-remove"
1688
  HTYPE = constants.HTYPE_NODE
1689
  _OP_REQP = ["node_name"]
1690

    
1691
  def BuildHooksEnv(self):
1692
    """Build hooks env.
1693

1694
    This doesn't run on the target node in the pre phase as a failed
1695
    node would then be impossible to remove.
1696

1697
    """
1698
    env = {
1699
      "OP_TARGET": self.op.node_name,
1700
      "NODE_NAME": self.op.node_name,
1701
      }
1702
    all_nodes = self.cfg.GetNodeList()
1703
    all_nodes.remove(self.op.node_name)
1704
    return env, all_nodes, all_nodes
1705

    
1706
  def CheckPrereq(self):
1707
    """Check prerequisites.
1708

1709
    This checks:
1710
     - the node exists in the configuration
1711
     - it does not have primary or secondary instances
1712
     - it's not the master
1713

1714
    Any errors are signalled by raising errors.OpPrereqError.
1715

1716
    """
1717
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1718
    if node is None:
1719
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1720

    
1721
    instance_list = self.cfg.GetInstanceList()
1722

    
1723
    masternode = self.cfg.GetMasterNode()
1724
    if node.name == masternode:
1725
      raise errors.OpPrereqError("Node is the master node,"
1726
                                 " you need to failover first.")
1727

    
1728
    for instance_name in instance_list:
1729
      instance = self.cfg.GetInstanceInfo(instance_name)
1730
      if node.name in instance.all_nodes:
1731
        raise errors.OpPrereqError("Instance %s is still running on the node,"
1732
                                   " please remove first." % instance_name)
1733
    self.op.node_name = node.name
1734
    self.node = node
1735

    
1736
  def Exec(self, feedback_fn):
1737
    """Removes the node from the cluster.
1738

1739
    """
1740
    node = self.node
1741
    logging.info("Stopping the node daemon and removing configs from node %s",
1742
                 node.name)
1743

    
1744
    self.context.RemoveNode(node.name)
1745

    
1746
    self.rpc.call_node_leave_cluster(node.name)
1747

    
1748
    # Promote nodes to master candidate as needed
1749
    _AdjustCandidatePool(self)
1750

    
1751

    
1752
class LUQueryNodes(NoHooksLU):
1753
  """Logical unit for querying nodes.
1754

1755
  """
1756
  _OP_REQP = ["output_fields", "names"]
1757
  REQ_BGL = False
1758
  _FIELDS_DYNAMIC = utils.FieldSet(
1759
    "dtotal", "dfree",
1760
    "mtotal", "mnode", "mfree",
1761
    "bootid",
1762
    "ctotal",
1763
    )
1764

    
1765
  _FIELDS_STATIC = utils.FieldSet(
1766
    "name", "pinst_cnt", "sinst_cnt",
1767
    "pinst_list", "sinst_list",
1768
    "pip", "sip", "tags",
1769
    "serial_no",
1770
    "master_candidate",
1771
    "master",
1772
    "offline",
1773
    )
1774

    
1775
  def ExpandNames(self):
1776
    _CheckOutputFields(static=self._FIELDS_STATIC,
1777
                       dynamic=self._FIELDS_DYNAMIC,
1778
                       selected=self.op.output_fields)
1779

    
1780
    self.needed_locks = {}
1781
    self.share_locks[locking.LEVEL_NODE] = 1
1782

    
1783
    if self.op.names:
1784
      self.wanted = _GetWantedNodes(self, self.op.names)
1785
    else:
1786
      self.wanted = locking.ALL_SET
1787

    
1788
    self.do_locking = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
1789
    if self.do_locking:
1790
      # if we don't request only static fields, we need to lock the nodes
1791
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
1792

    
1793

    
1794
  def CheckPrereq(self):
1795
    """Check prerequisites.
1796

1797
    """
1798
    # The validation of the node list is done in the _GetWantedNodes,
1799
    # if non empty, and if empty, there's no validation to do
1800
    pass
1801

    
1802
  def Exec(self, feedback_fn):
1803
    """Computes the list of nodes and their attributes.
1804

1805
    """
1806
    all_info = self.cfg.GetAllNodesInfo()
1807
    if self.do_locking:
1808
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
1809
    elif self.wanted != locking.ALL_SET:
1810
      nodenames = self.wanted
1811
      missing = set(nodenames).difference(all_info.keys())
1812
      if missing:
1813
        raise errors.OpExecError(
1814
          "Some nodes were removed before retrieving their data: %s" % missing)
1815
    else:
1816
      nodenames = all_info.keys()
1817

    
1818
    nodenames = utils.NiceSort(nodenames)
1819
    nodelist = [all_info[name] for name in nodenames]
1820

    
1821
    # begin data gathering
1822

    
1823
    if self.do_locking:
1824
      live_data = {}
1825
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
1826
                                          self.cfg.GetHypervisorType())
1827
      for name in nodenames:
1828
        nodeinfo = node_data[name]
1829
        if not nodeinfo.failed and nodeinfo.data:
1830
          nodeinfo = nodeinfo.data
1831
          fn = utils.TryConvert
1832
          live_data[name] = {
1833
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
1834
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
1835
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
1836
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
1837
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
1838
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
1839
            "bootid": nodeinfo.get('bootid', None),
1840
            }
1841
        else:
1842
          live_data[name] = {}
1843
    else:
1844
      live_data = dict.fromkeys(nodenames, {})
1845

    
1846
    node_to_primary = dict([(name, set()) for name in nodenames])
1847
    node_to_secondary = dict([(name, set()) for name in nodenames])
1848

    
1849
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1850
                             "sinst_cnt", "sinst_list"))
1851
    if inst_fields & frozenset(self.op.output_fields):
1852
      instancelist = self.cfg.GetInstanceList()
1853

    
1854
      for instance_name in instancelist:
1855
        inst = self.cfg.GetInstanceInfo(instance_name)
1856
        if inst.primary_node in node_to_primary:
1857
          node_to_primary[inst.primary_node].add(inst.name)
1858
        for secnode in inst.secondary_nodes:
1859
          if secnode in node_to_secondary:
1860
            node_to_secondary[secnode].add(inst.name)
1861

    
1862
    master_node = self.cfg.GetMasterNode()
1863

    
1864
    # end data gathering
1865

    
1866
    output = []
1867
    for node in nodelist:
1868
      node_output = []
1869
      for field in self.op.output_fields:
1870
        if field == "name":
1871
          val = node.name
1872
        elif field == "pinst_list":
1873
          val = list(node_to_primary[node.name])
1874
        elif field == "sinst_list":
1875
          val = list(node_to_secondary[node.name])
1876
        elif field == "pinst_cnt":
1877
          val = len(node_to_primary[node.name])
1878
        elif field == "sinst_cnt":
1879
          val = len(node_to_secondary[node.name])
1880
        elif field == "pip":
1881
          val = node.primary_ip
1882
        elif field == "sip":
1883
          val = node.secondary_ip
1884
        elif field == "tags":
1885
          val = list(node.GetTags())
1886
        elif field == "serial_no":
1887
          val = node.serial_no
1888
        elif field == "master_candidate":
1889
          val = node.master_candidate
1890
        elif field == "master":
1891
          val = node.name == master_node
1892
        elif field == "offline":
1893
          val = node.offline
1894
        elif self._FIELDS_DYNAMIC.Matches(field):
1895
          val = live_data[node.name].get(field, None)
1896
        else:
1897
          raise errors.ParameterError(field)
1898
        node_output.append(val)
1899
      output.append(node_output)
1900

    
1901
    return output
1902

    
1903

    
1904
class LUQueryNodeVolumes(NoHooksLU):
1905
  """Logical unit for getting volumes on node(s).
1906

1907
  """
1908
  _OP_REQP = ["nodes", "output_fields"]
1909
  REQ_BGL = False
1910
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
1911
  _FIELDS_STATIC = utils.FieldSet("node")
1912

    
1913
  def ExpandNames(self):
1914
    _CheckOutputFields(static=self._FIELDS_STATIC,
1915
                       dynamic=self._FIELDS_DYNAMIC,
1916
                       selected=self.op.output_fields)
1917

    
1918
    self.needed_locks = {}
1919
    self.share_locks[locking.LEVEL_NODE] = 1
1920
    if not self.op.nodes:
1921
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1922
    else:
1923
      self.needed_locks[locking.LEVEL_NODE] = \
1924
        _GetWantedNodes(self, self.op.nodes)
1925

    
1926
  def CheckPrereq(self):
1927
    """Check prerequisites.
1928

1929
    This checks that the fields required are valid output fields.
1930

1931
    """
1932
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
1933

    
1934
  def Exec(self, feedback_fn):
1935
    """Computes the list of nodes and their attributes.
1936

1937
    """
1938
    nodenames = self.nodes
1939
    volumes = self.rpc.call_node_volumes(nodenames)
1940

    
1941
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1942
             in self.cfg.GetInstanceList()]
1943

    
1944
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1945

    
1946
    output = []
1947
    for node in nodenames:
1948
      if node not in volumes or volumes[node].failed or not volumes[node].data:
1949
        continue
1950

    
1951
      node_vols = volumes[node].data[:]
1952
      node_vols.sort(key=lambda vol: vol['dev'])
1953

    
1954
      for vol in node_vols:
1955
        node_output = []
1956
        for field in self.op.output_fields:
1957
          if field == "node":
1958
            val = node
1959
          elif field == "phys":
1960
            val = vol['dev']
1961
          elif field == "vg":
1962
            val = vol['vg']
1963
          elif field == "name":
1964
            val = vol['name']
1965
          elif field == "size":
1966
            val = int(float(vol['size']))
1967
          elif field == "instance":
1968
            for inst in ilist:
1969
              if node not in lv_by_node[inst]:
1970
                continue
1971
              if vol['name'] in lv_by_node[inst][node]:
1972
                val = inst.name
1973
                break
1974
            else:
1975
              val = '-'
1976
          else:
1977
            raise errors.ParameterError(field)
1978
          node_output.append(str(val))
1979

    
1980
        output.append(node_output)
1981

    
1982
    return output
1983

    
1984

    
1985
class LUAddNode(LogicalUnit):
1986
  """Logical unit for adding node to the cluster.
1987

1988
  """
1989
  HPATH = "node-add"
1990
  HTYPE = constants.HTYPE_NODE
1991
  _OP_REQP = ["node_name"]
1992

    
1993
  def BuildHooksEnv(self):
1994
    """Build hooks env.
1995

1996
    This will run on all nodes before, and on all nodes + the new node after.
1997

1998
    """
1999
    env = {
2000
      "OP_TARGET": self.op.node_name,
2001
      "NODE_NAME": self.op.node_name,
2002
      "NODE_PIP": self.op.primary_ip,
2003
      "NODE_SIP": self.op.secondary_ip,
2004
      }
2005
    nodes_0 = self.cfg.GetNodeList()
2006
    nodes_1 = nodes_0 + [self.op.node_name, ]
2007
    return env, nodes_0, nodes_1
2008

    
2009
  def CheckPrereq(self):
2010
    """Check prerequisites.
2011

2012
    This checks:
2013
     - the new node is not already in the config
2014
     - it is resolvable
2015
     - its parameters (single/dual homed) matches the cluster
2016

2017
    Any errors are signalled by raising errors.OpPrereqError.
2018

2019
    """
2020
    node_name = self.op.node_name
2021
    cfg = self.cfg
2022

    
2023
    dns_data = utils.HostInfo(node_name)
2024

    
2025
    node = dns_data.name
2026
    primary_ip = self.op.primary_ip = dns_data.ip
2027
    secondary_ip = getattr(self.op, "secondary_ip", None)
2028
    if secondary_ip is None:
2029
      secondary_ip = primary_ip
2030
    if not utils.IsValidIP(secondary_ip):
2031
      raise errors.OpPrereqError("Invalid secondary IP given")
2032
    self.op.secondary_ip = secondary_ip
2033

    
2034
    node_list = cfg.GetNodeList()
2035
    if not self.op.readd and node in node_list:
2036
      raise errors.OpPrereqError("Node %s is already in the configuration" %
2037
                                 node)
2038
    elif self.op.readd and node not in node_list:
2039
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
2040

    
2041
    for existing_node_name in node_list:
2042
      existing_node = cfg.GetNodeInfo(existing_node_name)
2043

    
2044
      if self.op.readd and node == existing_node_name:
2045
        if (existing_node.primary_ip != primary_ip or
2046
            existing_node.secondary_ip != secondary_ip):
2047
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
2048
                                     " address configuration as before")
2049
        continue
2050

    
2051
      if (existing_node.primary_ip == primary_ip or
2052
          existing_node.secondary_ip == primary_ip or
2053
          existing_node.primary_ip == secondary_ip or
2054
          existing_node.secondary_ip == secondary_ip):
2055
        raise errors.OpPrereqError("New node ip address(es) conflict with"
2056
                                   " existing node %s" % existing_node.name)
2057

    
2058
    # check that the type of the node (single versus dual homed) is the
2059
    # same as for the master
2060
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
2061
    master_singlehomed = myself.secondary_ip == myself.primary_ip
2062
    newbie_singlehomed = secondary_ip == primary_ip
2063
    if master_singlehomed != newbie_singlehomed:
2064
      if master_singlehomed:
2065
        raise errors.OpPrereqError("The master has no private ip but the"
2066
                                   " new node has one")
2067
      else:
2068
        raise errors.OpPrereqError("The master has a private ip but the"
2069
                                   " new node doesn't have one")
2070

    
2071
    # checks reachablity
2072
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
2073
      raise errors.OpPrereqError("Node not reachable by ping")
2074

    
2075
    if not newbie_singlehomed:
2076
      # check reachability from my secondary ip to newbie's secondary ip
2077
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
2078
                           source=myself.secondary_ip):
2079
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
2080
                                   " based ping to noded port")
2081

    
2082
    cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2083
    mc_now, _ = self.cfg.GetMasterCandidateStats()
2084
    master_candidate = mc_now < cp_size
2085

    
2086
    self.new_node = objects.Node(name=node,
2087
                                 primary_ip=primary_ip,
2088
                                 secondary_ip=secondary_ip,
2089
                                 master_candidate=master_candidate,
2090
                                 offline=False)
2091

    
2092
  def Exec(self, feedback_fn):
2093
    """Adds the new node to the cluster.
2094

2095
    """
2096
    new_node = self.new_node
2097
    node = new_node.name
2098

    
2099
    # check connectivity
2100
    result = self.rpc.call_version([node])[node]
2101
    result.Raise()
2102
    if result.data:
2103
      if constants.PROTOCOL_VERSION == result.data:
2104
        logging.info("Communication to node %s fine, sw version %s match",
2105
                     node, result.data)
2106
      else:
2107
        raise errors.OpExecError("Version mismatch master version %s,"
2108
                                 " node version %s" %
2109
                                 (constants.PROTOCOL_VERSION, result.data))
2110
    else:
2111
      raise errors.OpExecError("Cannot get version from the new node")
2112

    
2113
    # setup ssh on node
2114
    logging.info("Copy ssh key to node %s", node)
2115
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
2116
    keyarray = []
2117
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
2118
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
2119
                priv_key, pub_key]
2120

    
2121
    for i in keyfiles:
2122
      f = open(i, 'r')
2123
      try:
2124
        keyarray.append(f.read())
2125
      finally:
2126
        f.close()
2127

    
2128
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
2129
                                    keyarray[2],
2130
                                    keyarray[3], keyarray[4], keyarray[5])
2131

    
2132
    if result.failed or not result.data:
2133
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
2134

    
2135
    # Add node to our /etc/hosts, and add key to known_hosts
2136
    utils.AddHostToEtcHosts(new_node.name)
2137

    
2138
    if new_node.secondary_ip != new_node.primary_ip:
2139
      result = self.rpc.call_node_has_ip_address(new_node.name,
2140
                                                 new_node.secondary_ip)
2141
      if result.failed or not result.data:
2142
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
2143
                                 " you gave (%s). Please fix and re-run this"
2144
                                 " command." % new_node.secondary_ip)
2145

    
2146
    node_verify_list = [self.cfg.GetMasterNode()]
2147
    node_verify_param = {
2148
      'nodelist': [node],
2149
      # TODO: do a node-net-test as well?
2150
    }
2151

    
2152
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
2153
                                       self.cfg.GetClusterName())
2154
    for verifier in node_verify_list:
2155
      if result[verifier].failed or not result[verifier].data:
2156
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
2157
                                 " for remote verification" % verifier)
2158
      if result[verifier].data['nodelist']:
2159
        for failed in result[verifier].data['nodelist']:
2160
          feedback_fn("ssh/hostname verification failed %s -> %s" %
2161
                      (verifier, result[verifier]['nodelist'][failed]))
2162
        raise errors.OpExecError("ssh/hostname verification failed.")
2163

    
2164
    # Distribute updated /etc/hosts and known_hosts to all nodes,
2165
    # including the node just added
2166
    myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
2167
    dist_nodes = self.cfg.GetNodeList()
2168
    if not self.op.readd:
2169
      dist_nodes.append(node)
2170
    if myself.name in dist_nodes:
2171
      dist_nodes.remove(myself.name)
2172

    
2173
    logging.debug("Copying hosts and known_hosts to all nodes")
2174
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
2175
      result = self.rpc.call_upload_file(dist_nodes, fname)
2176
      for to_node, to_result in result.iteritems():
2177
        if to_result.failed or not to_result.data:
2178
          logging.error("Copy of file %s to node %s failed", fname, to_node)
2179

    
2180
    to_copy = []
2181
    enabled_hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
2182
    if constants.HTS_USE_VNC.intersection(enabled_hypervisors):
2183
      to_copy.append(constants.VNC_PASSWORD_FILE)
2184

    
2185
    for fname in to_copy:
2186
      result = self.rpc.call_upload_file([node], fname)
2187
      if result[node].failed or not result[node]:
2188
        logging.error("Could not copy file %s to node %s", fname, node)
2189

    
2190
    if self.op.readd:
2191
      self.context.ReaddNode(new_node)
2192
    else:
2193
      self.context.AddNode(new_node)
2194

    
2195

    
2196
class LUSetNodeParams(LogicalUnit):
2197
  """Modifies the parameters of a node.
2198

2199
  """
2200
  HPATH = "node-modify"
2201
  HTYPE = constants.HTYPE_NODE
2202
  _OP_REQP = ["node_name"]
2203
  REQ_BGL = False
2204

    
2205
  def CheckArguments(self):
2206
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2207
    if node_name is None:
2208
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2209
    self.op.node_name = node_name
2210
    _CheckBooleanOpField(self.op, 'master_candidate')
2211
    _CheckBooleanOpField(self.op, 'offline')
2212
    if self.op.master_candidate is None and self.op.offline is None:
2213
      raise errors.OpPrereqError("Please pass at least one modification")
2214
    if self.op.offline == True and self.op.master_candidate == True:
2215
      raise errors.OpPrereqError("Can't set the node into offline and"
2216
                                 " master_candidate at the same time")
2217

    
2218
  def ExpandNames(self):
2219
    self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
2220

    
2221
  def BuildHooksEnv(self):
2222
    """Build hooks env.
2223

2224
    This runs on the master node.
2225

2226
    """
2227
    env = {
2228
      "OP_TARGET": self.op.node_name,
2229
      "MASTER_CANDIDATE": str(self.op.master_candidate),
2230
      "OFFLINE": str(self.op.offline),
2231
      }
2232
    nl = [self.cfg.GetMasterNode(),
2233
          self.op.node_name]
2234
    return env, nl, nl
2235

    
2236
  def CheckPrereq(self):
2237
    """Check prerequisites.
2238

2239
    This only checks the instance list against the existing names.
2240

2241
    """
2242
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
2243

    
2244
    if ((self.op.master_candidate == False or self.op.offline == True)
2245
        and node.master_candidate):
2246
      # we will demote the node from master_candidate
2247
      if self.op.node_name == self.cfg.GetMasterNode():
2248
        raise errors.OpPrereqError("The master node has to be a"
2249
                                   " master candidate and online")
2250
      cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2251
      num_candidates, _ = self.cfg.GetMasterCandidateStats()
2252
      if num_candidates <= cp_size:
2253
        msg = ("Not enough master candidates (desired"
2254
               " %d, new value will be %d)" % (cp_size, num_candidates-1))
2255
        if self.op.force:
2256
          self.LogWarning(msg)
2257
        else:
2258
          raise errors.OpPrereqError(msg)
2259

    
2260
    if (self.op.master_candidate == True and node.offline and
2261
        not self.op.offline == False):
2262
      raise errors.OpPrereqError("Can't set an offline node to"
2263
                                 " master_candidate")
2264

    
2265
    return
2266

    
2267
  def Exec(self, feedback_fn):
2268
    """Modifies a node.
2269

2270
    """
2271
    node = self.node
2272

    
2273
    result = []
2274

    
2275
    if self.op.offline is not None:
2276
      node.offline = self.op.offline
2277
      result.append(("offline", str(self.op.offline)))
2278
      if self.op.offline == True and node.master_candidate:
2279
        node.master_candidate = False
2280
        result.append(("master_candidate", "auto-demotion due to offline"))
2281

    
2282
    if self.op.master_candidate is not None:
2283
      node.master_candidate = self.op.master_candidate
2284
      result.append(("master_candidate", str(self.op.master_candidate)))
2285
      if self.op.master_candidate == False:
2286
        rrc = self.rpc.call_node_demote_from_mc(node.name)
2287
        if (rrc.failed or not isinstance(rrc.data, (tuple, list))
2288
            or len(rrc.data) != 2):
2289
          self.LogWarning("Node rpc error: %s" % rrc.error)
2290
        elif not rrc.data[0]:
2291
          self.LogWarning("Node failed to demote itself: %s" % rrc.data[1])
2292

    
2293
    # this will trigger configuration file update, if needed
2294
    self.cfg.Update(node)
2295
    # this will trigger job queue propagation or cleanup
2296
    if self.op.node_name != self.cfg.GetMasterNode():
2297
      self.context.ReaddNode(node)
2298

    
2299
    return result
2300

    
2301

    
2302
class LUQueryClusterInfo(NoHooksLU):
2303
  """Query cluster configuration.
2304

2305
  """
2306
  _OP_REQP = []
2307
  REQ_BGL = False
2308

    
2309
  def ExpandNames(self):
2310
    self.needed_locks = {}
2311

    
2312
  def CheckPrereq(self):
2313
    """No prerequsites needed for this LU.
2314

2315
    """
2316
    pass
2317

    
2318
  def Exec(self, feedback_fn):
2319
    """Return cluster config.
2320

2321
    """
2322
    cluster = self.cfg.GetClusterInfo()
2323
    result = {
2324
      "software_version": constants.RELEASE_VERSION,
2325
      "protocol_version": constants.PROTOCOL_VERSION,
2326
      "config_version": constants.CONFIG_VERSION,
2327
      "os_api_version": constants.OS_API_VERSION,
2328
      "export_version": constants.EXPORT_VERSION,
2329
      "architecture": (platform.architecture()[0], platform.machine()),
2330
      "name": cluster.cluster_name,
2331
      "master": cluster.master_node,
2332
      "default_hypervisor": cluster.default_hypervisor,
2333
      "enabled_hypervisors": cluster.enabled_hypervisors,
2334
      "hvparams": cluster.hvparams,
2335
      "beparams": cluster.beparams,
2336
      "candidate_pool_size": cluster.candidate_pool_size,
2337
      }
2338

    
2339
    return result
2340

    
2341

    
2342
class LUQueryConfigValues(NoHooksLU):
2343
  """Return configuration values.
2344

2345
  """
2346
  _OP_REQP = []
2347
  REQ_BGL = False
2348
  _FIELDS_DYNAMIC = utils.FieldSet()
2349
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag")
2350

    
2351
  def ExpandNames(self):
2352
    self.needed_locks = {}
2353

    
2354
    _CheckOutputFields(static=self._FIELDS_STATIC,
2355
                       dynamic=self._FIELDS_DYNAMIC,
2356
                       selected=self.op.output_fields)
2357

    
2358
  def CheckPrereq(self):
2359
    """No prerequisites.
2360

2361
    """
2362
    pass
2363

    
2364
  def Exec(self, feedback_fn):
2365
    """Dump a representation of the cluster config to the standard output.
2366

2367
    """
2368
    values = []
2369
    for field in self.op.output_fields:
2370
      if field == "cluster_name":
2371
        entry = self.cfg.GetClusterName()
2372
      elif field == "master_node":
2373
        entry = self.cfg.GetMasterNode()
2374
      elif field == "drain_flag":
2375
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2376
      else:
2377
        raise errors.ParameterError(field)
2378
      values.append(entry)
2379
    return values
2380

    
2381

    
2382
class LUActivateInstanceDisks(NoHooksLU):
2383
  """Bring up an instance's disks.
2384

2385
  """
2386
  _OP_REQP = ["instance_name"]
2387
  REQ_BGL = False
2388

    
2389
  def ExpandNames(self):
2390
    self._ExpandAndLockInstance()
2391
    self.needed_locks[locking.LEVEL_NODE] = []
2392
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2393

    
2394
  def DeclareLocks(self, level):
2395
    if level == locking.LEVEL_NODE:
2396
      self._LockInstancesNodes()
2397

    
2398
  def CheckPrereq(self):
2399
    """Check prerequisites.
2400

2401
    This checks that the instance is in the cluster.
2402

2403
    """
2404
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2405
    assert self.instance is not None, \
2406
      "Cannot retrieve locked instance %s" % self.op.instance_name
2407
    _CheckNodeOnline(self, self.instance.primary_node)
2408

    
2409
  def Exec(self, feedback_fn):
2410
    """Activate the disks.
2411

2412
    """
2413
    disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
2414
    if not disks_ok:
2415
      raise errors.OpExecError("Cannot activate block devices")
2416

    
2417
    return disks_info
2418

    
2419

    
2420
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
2421
  """Prepare the block devices for an instance.
2422

2423
  This sets up the block devices on all nodes.
2424

2425
  @type lu: L{LogicalUnit}
2426
  @param lu: the logical unit on whose behalf we execute
2427
  @type instance: L{objects.Instance}
2428
  @param instance: the instance for whose disks we assemble
2429
  @type ignore_secondaries: boolean
2430
  @param ignore_secondaries: if true, errors on secondary nodes
2431
      won't result in an error return from the function
2432
  @return: False if the operation failed, otherwise a list of
2433
      (host, instance_visible_name, node_visible_name)
2434
      with the mapping from node devices to instance devices
2435

2436
  """
2437
  device_info = []
2438
  disks_ok = True
2439
  iname = instance.name
2440
  # With the two passes mechanism we try to reduce the window of
2441
  # opportunity for the race condition of switching DRBD to primary
2442
  # before handshaking occured, but we do not eliminate it
2443

    
2444
  # The proper fix would be to wait (with some limits) until the
2445
  # connection has been made and drbd transitions from WFConnection
2446
  # into any other network-connected state (Connected, SyncTarget,
2447
  # SyncSource, etc.)
2448

    
2449
  # 1st pass, assemble on all nodes in secondary mode
2450
  for inst_disk in instance.disks:
2451
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2452
      lu.cfg.SetDiskID(node_disk, node)
2453
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2454
      if result.failed or not result:
2455
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2456
                           " (is_primary=False, pass=1)",
2457
                           inst_disk.iv_name, node)
2458
        if not ignore_secondaries:
2459
          disks_ok = False
2460

    
2461
  # FIXME: race condition on drbd migration to primary
2462

    
2463
  # 2nd pass, do only the primary node
2464
  for inst_disk in instance.disks:
2465
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2466
      if node != instance.primary_node:
2467
        continue
2468
      lu.cfg.SetDiskID(node_disk, node)
2469
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2470
      if result.failed or not result:
2471
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2472
                           " (is_primary=True, pass=2)",
2473
                           inst_disk.iv_name, node)
2474
        disks_ok = False
2475
    device_info.append((instance.primary_node, inst_disk.iv_name, result.data))
2476

    
2477
  # leave the disks configured for the primary node
2478
  # this is a workaround that would be fixed better by
2479
  # improving the logical/physical id handling
2480
  for disk in instance.disks:
2481
    lu.cfg.SetDiskID(disk, instance.primary_node)
2482

    
2483
  return disks_ok, device_info
2484

    
2485

    
2486
def _StartInstanceDisks(lu, instance, force):
2487
  """Start the disks of an instance.
2488

2489
  """
2490
  disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2491
                                           ignore_secondaries=force)
2492
  if not disks_ok:
2493
    _ShutdownInstanceDisks(lu, instance)
2494
    if force is not None and not force:
2495
      lu.proc.LogWarning("", hint="If the message above refers to a"
2496
                         " secondary node,"
2497
                         " you can retry the operation using '--force'.")
2498
    raise errors.OpExecError("Disk consistency error")
2499

    
2500

    
2501
class LUDeactivateInstanceDisks(NoHooksLU):
2502
  """Shutdown an instance's disks.
2503

2504
  """
2505
  _OP_REQP = ["instance_name"]
2506
  REQ_BGL = False
2507

    
2508
  def ExpandNames(self):
2509
    self._ExpandAndLockInstance()
2510
    self.needed_locks[locking.LEVEL_NODE] = []
2511
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2512

    
2513
  def DeclareLocks(self, level):
2514
    if level == locking.LEVEL_NODE:
2515
      self._LockInstancesNodes()
2516

    
2517
  def CheckPrereq(self):
2518
    """Check prerequisites.
2519

2520
    This checks that the instance is in the cluster.
2521

2522
    """
2523
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2524
    assert self.instance is not None, \
2525
      "Cannot retrieve locked instance %s" % self.op.instance_name
2526

    
2527
  def Exec(self, feedback_fn):
2528
    """Deactivate the disks
2529

2530
    """
2531
    instance = self.instance
2532
    _SafeShutdownInstanceDisks(self, instance)
2533

    
2534

    
2535
def _SafeShutdownInstanceDisks(lu, instance):
2536
  """Shutdown block devices of an instance.
2537

2538
  This function checks if an instance is running, before calling
2539
  _ShutdownInstanceDisks.
2540

2541
  """
2542
  ins_l = lu.rpc.call_instance_list([instance.primary_node],
2543
                                      [instance.hypervisor])
2544
  ins_l = ins_l[instance.primary_node]
2545
  if ins_l.failed or not isinstance(ins_l.data, list):
2546
    raise errors.OpExecError("Can't contact node '%s'" %
2547
                             instance.primary_node)
2548

    
2549
  if instance.name in ins_l.data:
2550
    raise errors.OpExecError("Instance is running, can't shutdown"
2551
                             " block devices.")
2552

    
2553
  _ShutdownInstanceDisks(lu, instance)
2554

    
2555

    
2556
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2557
  """Shutdown block devices of an instance.
2558

2559
  This does the shutdown on all nodes of the instance.
2560

2561
  If the ignore_primary is false, errors on the primary node are
2562
  ignored.
2563

2564
  """
2565
  result = True
2566
  for disk in instance.disks:
2567
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2568
      lu.cfg.SetDiskID(top_disk, node)
2569
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
2570
      if result.failed or not result.data:
2571
        logging.error("Could not shutdown block device %s on node %s",
2572
                      disk.iv_name, node)
2573
        if not ignore_primary or node != instance.primary_node:
2574
          result = False
2575
  return result
2576

    
2577

    
2578
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
2579
  """Checks if a node has enough free memory.
2580

2581
  This function check if a given node has the needed amount of free
2582
  memory. In case the node has less memory or we cannot get the
2583
  information from the node, this function raise an OpPrereqError
2584
  exception.
2585

2586
  @type lu: C{LogicalUnit}
2587
  @param lu: a logical unit from which we get configuration data
2588
  @type node: C{str}
2589
  @param node: the node to check
2590
  @type reason: C{str}
2591
  @param reason: string to use in the error message
2592
  @type requested: C{int}
2593
  @param requested: the amount of memory in MiB to check for
2594
  @type hypervisor_name: C{str}
2595
  @param hypervisor_name: the hypervisor to ask for memory stats
2596
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2597
      we cannot check the node
2598

2599
  """
2600
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
2601
  nodeinfo[node].Raise()
2602
  free_mem = nodeinfo[node].data.get('memory_free')
2603
  if not isinstance(free_mem, int):
2604
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2605
                             " was '%s'" % (node, free_mem))
2606
  if requested > free_mem:
2607
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2608
                             " needed %s MiB, available %s MiB" %
2609
                             (node, reason, requested, free_mem))
2610

    
2611

    
2612
class LUStartupInstance(LogicalUnit):
2613
  """Starts an instance.
2614

2615
  """
2616
  HPATH = "instance-start"
2617
  HTYPE = constants.HTYPE_INSTANCE
2618
  _OP_REQP = ["instance_name", "force"]
2619
  REQ_BGL = False
2620

    
2621
  def ExpandNames(self):
2622
    self._ExpandAndLockInstance()
2623

    
2624
  def BuildHooksEnv(self):
2625
    """Build hooks env.
2626

2627
    This runs on master, primary and secondary nodes of the instance.
2628

2629
    """
2630
    env = {
2631
      "FORCE": self.op.force,
2632
      }
2633
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2634
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2635
    return env, nl, nl
2636

    
2637
  def CheckPrereq(self):
2638
    """Check prerequisites.
2639

2640
    This checks that the instance is in the cluster.
2641

2642
    """
2643
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2644
    assert self.instance is not None, \
2645
      "Cannot retrieve locked instance %s" % self.op.instance_name
2646

    
2647
    _CheckNodeOnline(self, instance.primary_node)
2648

    
2649
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2650
    # check bridges existance
2651
    _CheckInstanceBridgesExist(self, instance)
2652

    
2653
    _CheckNodeFreeMemory(self, instance.primary_node,
2654
                         "starting instance %s" % instance.name,
2655
                         bep[constants.BE_MEMORY], instance.hypervisor)
2656

    
2657
  def Exec(self, feedback_fn):
2658
    """Start the instance.
2659

2660
    """
2661
    instance = self.instance
2662
    force = self.op.force
2663
    extra_args = getattr(self.op, "extra_args", "")
2664

    
2665
    self.cfg.MarkInstanceUp(instance.name)
2666

    
2667
    node_current = instance.primary_node
2668

    
2669
    _StartInstanceDisks(self, instance, force)
2670

    
2671
    result = self.rpc.call_instance_start(node_current, instance, extra_args)
2672
    msg = result.RemoteFailMsg()
2673
    if msg:
2674
      _ShutdownInstanceDisks(self, instance)
2675
      raise errors.OpExecError("Could not start instance: %s" % msg)
2676

    
2677

    
2678
class LURebootInstance(LogicalUnit):
2679
  """Reboot an instance.
2680

2681
  """
2682
  HPATH = "instance-reboot"
2683
  HTYPE = constants.HTYPE_INSTANCE
2684
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2685
  REQ_BGL = False
2686

    
2687
  def ExpandNames(self):
2688
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2689
                                   constants.INSTANCE_REBOOT_HARD,
2690
                                   constants.INSTANCE_REBOOT_FULL]:
2691
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2692
                                  (constants.INSTANCE_REBOOT_SOFT,
2693
                                   constants.INSTANCE_REBOOT_HARD,
2694
                                   constants.INSTANCE_REBOOT_FULL))
2695
    self._ExpandAndLockInstance()
2696

    
2697
  def BuildHooksEnv(self):
2698
    """Build hooks env.
2699

2700
    This runs on master, primary and secondary nodes of the instance.
2701

2702
    """
2703
    env = {
2704
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2705
      }
2706
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2707
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2708
    return env, nl, nl
2709

    
2710
  def CheckPrereq(self):
2711
    """Check prerequisites.
2712

2713
    This checks that the instance is in the cluster.
2714

2715
    """
2716
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2717
    assert self.instance is not None, \
2718
      "Cannot retrieve locked instance %s" % self.op.instance_name
2719

    
2720
    _CheckNodeOnline(self, instance.primary_node)
2721

    
2722
    # check bridges existance
2723
    _CheckInstanceBridgesExist(self, instance)
2724

    
2725
  def Exec(self, feedback_fn):
2726
    """Reboot the instance.
2727

2728
    """
2729
    instance = self.instance
2730
    ignore_secondaries = self.op.ignore_secondaries
2731
    reboot_type = self.op.reboot_type
2732
    extra_args = getattr(self.op, "extra_args", "")
2733

    
2734
    node_current = instance.primary_node
2735

    
2736
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2737
                       constants.INSTANCE_REBOOT_HARD]:
2738
      result = self.rpc.call_instance_reboot(node_current, instance,
2739
                                             reboot_type, extra_args)
2740
      if result.failed or not result.data:
2741
        raise errors.OpExecError("Could not reboot instance")
2742
    else:
2743
      if not self.rpc.call_instance_shutdown(node_current, instance):
2744
        raise errors.OpExecError("could not shutdown instance for full reboot")
2745
      _ShutdownInstanceDisks(self, instance)
2746
      _StartInstanceDisks(self, instance, ignore_secondaries)
2747
      result = self.rpc.call_instance_start(node_current, instance, extra_args)
2748
      msg = result.RemoteFailMsg()
2749
      if msg:
2750
        _ShutdownInstanceDisks(self, instance)
2751
        raise errors.OpExecError("Could not start instance for"
2752
                                 " full reboot: %s" % msg)
2753

    
2754
    self.cfg.MarkInstanceUp(instance.name)
2755

    
2756

    
2757
class LUShutdownInstance(LogicalUnit):
2758
  """Shutdown an instance.
2759

2760
  """
2761
  HPATH = "instance-stop"
2762
  HTYPE = constants.HTYPE_INSTANCE
2763
  _OP_REQP = ["instance_name"]
2764
  REQ_BGL = False
2765

    
2766
  def ExpandNames(self):
2767
    self._ExpandAndLockInstance()
2768

    
2769
  def BuildHooksEnv(self):
2770
    """Build hooks env.
2771

2772
    This runs on master, primary and secondary nodes of the instance.
2773

2774
    """
2775
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2776
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2777
    return env, nl, nl
2778

    
2779
  def CheckPrereq(self):
2780
    """Check prerequisites.
2781

2782
    This checks that the instance is in the cluster.
2783

2784
    """
2785
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2786
    assert self.instance is not None, \
2787
      "Cannot retrieve locked instance %s" % self.op.instance_name
2788
    _CheckNodeOnline(self, self.instance.primary_node)
2789

    
2790
  def Exec(self, feedback_fn):
2791
    """Shutdown the instance.
2792

2793
    """
2794
    instance = self.instance
2795
    node_current = instance.primary_node
2796
    self.cfg.MarkInstanceDown(instance.name)
2797
    result = self.rpc.call_instance_shutdown(node_current, instance)
2798
    if result.failed or not result.data:
2799
      self.proc.LogWarning("Could not shutdown instance")
2800

    
2801
    _ShutdownInstanceDisks(self, instance)
2802

    
2803

    
2804
class LUReinstallInstance(LogicalUnit):
2805
  """Reinstall an instance.
2806

2807
  """
2808
  HPATH = "instance-reinstall"
2809
  HTYPE = constants.HTYPE_INSTANCE
2810
  _OP_REQP = ["instance_name"]
2811
  REQ_BGL = False
2812

    
2813
  def ExpandNames(self):
2814
    self._ExpandAndLockInstance()
2815

    
2816
  def BuildHooksEnv(self):
2817
    """Build hooks env.
2818

2819
    This runs on master, primary and secondary nodes of the instance.
2820

2821
    """
2822
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2823
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2824
    return env, nl, nl
2825

    
2826
  def CheckPrereq(self):
2827
    """Check prerequisites.
2828

2829
    This checks that the instance is in the cluster and is not running.
2830

2831
    """
2832
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2833
    assert instance is not None, \
2834
      "Cannot retrieve locked instance %s" % self.op.instance_name
2835
    _CheckNodeOnline(self, instance.primary_node)
2836

    
2837
    if instance.disk_template == constants.DT_DISKLESS:
2838
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2839
                                 self.op.instance_name)
2840
    if instance.admin_up:
2841
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2842
                                 self.op.instance_name)
2843
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2844
                                              instance.name,
2845
                                              instance.hypervisor)
2846
    if remote_info.failed or remote_info.data:
2847
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2848
                                 (self.op.instance_name,
2849
                                  instance.primary_node))
2850

    
2851
    self.op.os_type = getattr(self.op, "os_type", None)
2852
    if self.op.os_type is not None:
2853
      # OS verification
2854
      pnode = self.cfg.GetNodeInfo(
2855
        self.cfg.ExpandNodeName(instance.primary_node))
2856
      if pnode is None:
2857
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2858
                                   self.op.pnode)
2859
      result = self.rpc.call_os_get(pnode.name, self.op.os_type)
2860
      result.Raise()
2861
      if not isinstance(result.data, objects.OS):
2862
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2863
                                   " primary node"  % self.op.os_type)
2864

    
2865
    self.instance = instance
2866

    
2867
  def Exec(self, feedback_fn):
2868
    """Reinstall the instance.
2869

2870
    """
2871
    inst = self.instance
2872

    
2873
    if self.op.os_type is not None:
2874
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2875
      inst.os = self.op.os_type
2876
      self.cfg.Update(inst)
2877

    
2878
    _StartInstanceDisks(self, inst, None)
2879
    try:
2880
      feedback_fn("Running the instance OS create scripts...")
2881
      result = self.rpc.call_instance_os_add(inst.primary_node, inst)
2882
      msg = result.RemoteFailMsg()
2883
      if msg:
2884
        raise errors.OpExecError("Could not install OS for instance %s"
2885
                                 " on node %s: %s" %
2886
                                 (inst.name, inst.primary_node, msg))
2887
    finally:
2888
      _ShutdownInstanceDisks(self, inst)
2889

    
2890

    
2891
class LURenameInstance(LogicalUnit):
2892
  """Rename an instance.
2893

2894
  """
2895
  HPATH = "instance-rename"
2896
  HTYPE = constants.HTYPE_INSTANCE
2897
  _OP_REQP = ["instance_name", "new_name"]
2898

    
2899
  def BuildHooksEnv(self):
2900
    """Build hooks env.
2901

2902
    This runs on master, primary and secondary nodes of the instance.
2903

2904
    """
2905
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2906
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2907
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2908
    return env, nl, nl
2909

    
2910
  def CheckPrereq(self):
2911
    """Check prerequisites.
2912

2913
    This checks that the instance is in the cluster and is not running.
2914

2915
    """
2916
    instance = self.cfg.GetInstanceInfo(
2917
      self.cfg.ExpandInstanceName(self.op.instance_name))
2918
    if instance is None:
2919
      raise errors.OpPrereqError("Instance '%s' not known" %
2920
                                 self.op.instance_name)
2921
    _CheckNodeOnline(self, instance.primary_node)
2922

    
2923
    if instance.admin_up:
2924
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2925
                                 self.op.instance_name)
2926
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2927
                                              instance.name,
2928
                                              instance.hypervisor)
2929
    remote_info.Raise()
2930
    if remote_info.data:
2931
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2932
                                 (self.op.instance_name,
2933
                                  instance.primary_node))
2934
    self.instance = instance
2935

    
2936
    # new name verification
2937
    name_info = utils.HostInfo(self.op.new_name)
2938

    
2939
    self.op.new_name = new_name = name_info.name
2940
    instance_list = self.cfg.GetInstanceList()
2941
    if new_name in instance_list:
2942
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2943
                                 new_name)
2944

    
2945
    if not getattr(self.op, "ignore_ip", False):
2946
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
2947
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2948
                                   (name_info.ip, new_name))
2949

    
2950

    
2951
  def Exec(self, feedback_fn):
2952
    """Reinstall the instance.
2953

2954
    """
2955
    inst = self.instance
2956
    old_name = inst.name
2957

    
2958
    if inst.disk_template == constants.DT_FILE:
2959
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2960

    
2961
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2962
    # Change the instance lock. This is definitely safe while we hold the BGL
2963
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
2964
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
2965

    
2966
    # re-read the instance from the configuration after rename
2967
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2968

    
2969
    if inst.disk_template == constants.DT_FILE:
2970
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2971
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
2972
                                                     old_file_storage_dir,
2973
                                                     new_file_storage_dir)
2974
      result.Raise()
2975
      if not result.data:
2976
        raise errors.OpExecError("Could not connect to node '%s' to rename"
2977
                                 " directory '%s' to '%s' (but the instance"
2978
                                 " has been renamed in Ganeti)" % (
2979
                                 inst.primary_node, old_file_storage_dir,
2980
                                 new_file_storage_dir))
2981

    
2982
      if not result.data[0]:
2983
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2984
                                 " (but the instance has been renamed in"
2985
                                 " Ganeti)" % (old_file_storage_dir,
2986
                                               new_file_storage_dir))
2987

    
2988
    _StartInstanceDisks(self, inst, None)
2989
    try:
2990
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
2991
                                                 old_name)
2992
      msg = result.RemoteFailMsg()
2993
      if msg:
2994
        msg = ("Could not run OS rename script for instance %s on node %s"
2995
               " (but the instance has been renamed in Ganeti): %s" %
2996
               (inst.name, inst.primary_node, msg))
2997
        self.proc.LogWarning(msg)
2998
    finally:
2999
      _ShutdownInstanceDisks(self, inst)
3000

    
3001

    
3002
class LURemoveInstance(LogicalUnit):
3003
  """Remove an instance.
3004

3005
  """
3006
  HPATH = "instance-remove"
3007
  HTYPE = constants.HTYPE_INSTANCE
3008
  _OP_REQP = ["instance_name", "ignore_failures"]
3009
  REQ_BGL = False
3010

    
3011
  def ExpandNames(self):
3012
    self._ExpandAndLockInstance()
3013
    self.needed_locks[locking.LEVEL_NODE] = []
3014
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3015

    
3016
  def DeclareLocks(self, level):
3017
    if level == locking.LEVEL_NODE:
3018
      self._LockInstancesNodes()
3019

    
3020
  def BuildHooksEnv(self):
3021
    """Build hooks env.
3022

3023
    This runs on master, primary and secondary nodes of the instance.
3024

3025
    """
3026
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3027
    nl = [self.cfg.GetMasterNode()]
3028
    return env, nl, nl
3029

    
3030
  def CheckPrereq(self):
3031
    """Check prerequisites.
3032

3033
    This checks that the instance is in the cluster.
3034

3035
    """
3036
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3037
    assert self.instance is not None, \
3038
      "Cannot retrieve locked instance %s" % self.op.instance_name
3039

    
3040
  def Exec(self, feedback_fn):
3041
    """Remove the instance.
3042

3043
    """
3044
    instance = self.instance
3045
    logging.info("Shutting down instance %s on node %s",
3046
                 instance.name, instance.primary_node)
3047

    
3048
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
3049
    if result.failed or not result.data:
3050
      if self.op.ignore_failures:
3051
        feedback_fn("Warning: can't shutdown instance")
3052
      else:
3053
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
3054
                                 (instance.name, instance.primary_node))
3055

    
3056
    logging.info("Removing block devices for instance %s", instance.name)
3057

    
3058
    if not _RemoveDisks(self, instance):
3059
      if self.op.ignore_failures:
3060
        feedback_fn("Warning: can't remove instance's disks")
3061
      else:
3062
        raise errors.OpExecError("Can't remove instance's disks")
3063

    
3064
    logging.info("Removing instance %s out of cluster config", instance.name)
3065

    
3066
    self.cfg.RemoveInstance(instance.name)
3067
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
3068

    
3069

    
3070
class LUQueryInstances(NoHooksLU):
3071
  """Logical unit for querying instances.
3072

3073
  """
3074
  _OP_REQP = ["output_fields", "names"]
3075
  REQ_BGL = False
3076
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
3077
                                    "admin_state", "admin_ram",
3078
                                    "disk_template", "ip", "mac", "bridge",
3079
                                    "sda_size", "sdb_size", "vcpus", "tags",
3080
                                    "network_port", "beparams",
3081
                                    "(disk).(size)/([0-9]+)",
3082
                                    "(disk).(sizes)",
3083
                                    "(nic).(mac|ip|bridge)/([0-9]+)",
3084
                                    "(nic).(macs|ips|bridges)",
3085
                                    "(disk|nic).(count)",
3086
                                    "serial_no", "hypervisor", "hvparams",] +
3087
                                  ["hv/%s" % name
3088
                                   for name in constants.HVS_PARAMETERS] +
3089
                                  ["be/%s" % name
3090
                                   for name in constants.BES_PARAMETERS])
3091
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
3092

    
3093

    
3094
  def ExpandNames(self):
3095
    _CheckOutputFields(static=self._FIELDS_STATIC,
3096
                       dynamic=self._FIELDS_DYNAMIC,
3097
                       selected=self.op.output_fields)
3098

    
3099
    self.needed_locks = {}
3100
    self.share_locks[locking.LEVEL_INSTANCE] = 1
3101
    self.share_locks[locking.LEVEL_NODE] = 1
3102

    
3103
    if self.op.names:
3104
      self.wanted = _GetWantedInstances(self, self.op.names)
3105
    else:
3106
      self.wanted = locking.ALL_SET
3107

    
3108
    self.do_locking = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
3109
    if self.do_locking:
3110
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3111
      self.needed_locks[locking.LEVEL_NODE] = []
3112
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3113

    
3114
  def DeclareLocks(self, level):
3115
    if level == locking.LEVEL_NODE and self.do_locking:
3116
      self._LockInstancesNodes()
3117

    
3118
  def CheckPrereq(self):
3119
    """Check prerequisites.
3120

3121
    """
3122
    pass
3123

    
3124
  def Exec(self, feedback_fn):
3125
    """Computes the list of nodes and their attributes.
3126

3127
    """
3128
    all_info = self.cfg.GetAllInstancesInfo()
3129
    if self.wanted == locking.ALL_SET:
3130
      # caller didn't specify instance names, so ordering is not important
3131
      if self.do_locking:
3132
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
3133
      else:
3134
        instance_names = all_info.keys()
3135
      instance_names = utils.NiceSort(instance_names)
3136
    else:
3137
      # caller did specify names, so we must keep the ordering
3138
      if self.do_locking:
3139
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
3140
      else:
3141
        tgt_set = all_info.keys()
3142
      missing = set(self.wanted).difference(tgt_set)
3143
      if missing:
3144
        raise errors.OpExecError("Some instances were removed before"
3145
                                 " retrieving their data: %s" % missing)
3146
      instance_names = self.wanted
3147

    
3148
    instance_list = [all_info[iname] for iname in instance_names]
3149

    
3150
    # begin data gathering
3151

    
3152
    nodes = frozenset([inst.primary_node for inst in instance_list])
3153
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
3154

    
3155
    bad_nodes = []
3156
    off_nodes = []
3157
    if self.do_locking:
3158
      live_data = {}
3159
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
3160
      for name in nodes:
3161
        result = node_data[name]
3162
        if result.offline:
3163
          # offline nodes will be in both lists
3164
          off_nodes.append(name)
3165
        if result.failed:
3166
          bad_nodes.append(name)
3167
        else:
3168
          if result.data:
3169
            live_data.update(result.data)
3170
            # else no instance is alive
3171
    else:
3172
      live_data = dict([(name, {}) for name in instance_names])
3173

    
3174
    # end data gathering
3175

    
3176
    HVPREFIX = "hv/"
3177
    BEPREFIX = "be/"
3178
    output = []
3179
    for instance in instance_list:
3180
      iout = []
3181
      i_hv = self.cfg.GetClusterInfo().FillHV(instance)
3182
      i_be = self.cfg.GetClusterInfo().FillBE(instance)
3183
      for field in self.op.output_fields:
3184
        st_match = self._FIELDS_STATIC.Matches(field)
3185
        if field == "name":
3186
          val = instance.name
3187
        elif field == "os":
3188
          val = instance.os
3189
        elif field == "pnode":
3190
          val = instance.primary_node
3191
        elif field == "snodes":
3192
          val = list(instance.secondary_nodes)
3193
        elif field == "admin_state":
3194
          val = instance.admin_up
3195
        elif field == "oper_state":
3196
          if instance.primary_node in bad_nodes:
3197
            val = None
3198
          else:
3199
            val = bool(live_data.get(instance.name))
3200
        elif field == "status":
3201
          if instance.primary_node in off_nodes:
3202
            val = "ERROR_nodeoffline"
3203
          elif instance.primary_node in bad_nodes:
3204
            val = "ERROR_nodedown"
3205
          else:
3206
            running = bool(live_data.get(instance.name))
3207
            if running:
3208
              if instance.admin_up:
3209
                val = "running"
3210
              else:
3211
                val = "ERROR_up"
3212
            else:
3213
              if instance.admin_up:
3214
                val = "ERROR_down"
3215
              else:
3216
                val = "ADMIN_down"
3217
        elif field == "oper_ram":
3218
          if instance.primary_node in bad_nodes:
3219
            val = None
3220
          elif instance.name in live_data:
3221
            val = live_data[instance.name].get("memory", "?")
3222
          else:
3223
            val = "-"
3224
        elif field == "disk_template":
3225
          val = instance.disk_template
3226
        elif field == "ip":
3227
          val = instance.nics[0].ip
3228
        elif field == "bridge":
3229
          val = instance.nics[0].bridge
3230
        elif field == "mac":
3231
          val = instance.nics[0].mac
3232
        elif field == "sda_size" or field == "sdb_size":
3233
          idx = ord(field[2]) - ord('a')
3234
          try:
3235
            val = instance.FindDisk(idx).size
3236
          except errors.OpPrereqError:
3237
            val = None
3238
        elif field == "tags":
3239
          val = list(instance.GetTags())
3240
        elif field == "serial_no":
3241
          val = instance.serial_no
3242
        elif field == "network_port":
3243
          val = instance.network_port
3244
        elif field == "hypervisor":
3245
          val = instance.hypervisor
3246
        elif field == "hvparams":
3247
          val = i_hv
3248
        elif (field.startswith(HVPREFIX) and
3249
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
3250
          val = i_hv.get(field[len(HVPREFIX):], None)
3251
        elif field == "beparams":
3252
          val = i_be
3253
        elif (field.startswith(BEPREFIX) and
3254
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
3255
          val = i_be.get(field[len(BEPREFIX):], None)
3256
        elif st_match and st_match.groups():
3257
          # matches a variable list
3258
          st_groups = st_match.groups()
3259
          if st_groups and st_groups[0] == "disk":
3260
            if st_groups[1] == "count":
3261
              val = len(instance.disks)
3262
            elif st_groups[1] == "sizes":
3263
              val = [disk.size for disk in instance.disks]
3264
            elif st_groups[1] == "size":
3265
              try:
3266
                val = instance.FindDisk(st_groups[2]).size
3267
              except errors.OpPrereqError:
3268
                val = None
3269
            else:
3270
              assert False, "Unhandled disk parameter"
3271
          elif st_groups[0] == "nic":
3272
            if st_groups[1] == "count":
3273
              val = len(instance.nics)
3274
            elif st_groups[1] == "macs":
3275
              val = [nic.mac for nic in instance.nics]
3276
            elif st_groups[1] == "ips":
3277
              val = [nic.ip for nic in instance.nics]
3278
            elif st_groups[1] == "bridges":
3279
              val = [nic.bridge for nic in instance.nics]
3280
            else:
3281
              # index-based item
3282
              nic_idx = int(st_groups[2])
3283
              if nic_idx >= len(instance.nics):
3284
                val = None
3285
              else:
3286
                if st_groups[1] == "mac":
3287
                  val = instance.nics[nic_idx].mac
3288
                elif st_groups[1] == "ip":
3289
                  val = instance.nics[nic_idx].ip
3290
                elif st_groups[1] == "bridge":
3291
                  val = instance.nics[nic_idx].bridge
3292
                else:
3293
                  assert False, "Unhandled NIC parameter"
3294
          else:
3295
            assert False, "Unhandled variable parameter"
3296
        else:
3297
          raise errors.ParameterError(field)
3298
        iout.append(val)
3299
      output.append(iout)
3300

    
3301
    return output
3302

    
3303

    
3304
class LUFailoverInstance(LogicalUnit):
3305
  """Failover an instance.
3306

3307
  """
3308
  HPATH = "instance-failover"
3309
  HTYPE = constants.HTYPE_INSTANCE
3310
  _OP_REQP = ["instance_name", "ignore_consistency"]
3311
  REQ_BGL = False
3312

    
3313
  def ExpandNames(self):
3314
    self._ExpandAndLockInstance()
3315
    self.needed_locks[locking.LEVEL_NODE] = []
3316
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3317

    
3318
  def DeclareLocks(self, level):
3319
    if level == locking.LEVEL_NODE:
3320
      self._LockInstancesNodes()
3321

    
3322
  def BuildHooksEnv(self):
3323
    """Build hooks env.
3324

3325
    This runs on master, primary and secondary nodes of the instance.
3326

3327
    """
3328
    env = {
3329
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
3330
      }
3331
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3332
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3333
    return env, nl, nl
3334

    
3335
  def CheckPrereq(self):
3336
    """Check prerequisites.
3337

3338
    This checks that the instance is in the cluster.
3339

3340
    """
3341
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3342
    assert self.instance is not None, \
3343
      "Cannot retrieve locked instance %s" % self.op.instance_name
3344

    
3345
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3346
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3347
      raise errors.OpPrereqError("Instance's disk layout is not"
3348
                                 " network mirrored, cannot failover.")
3349

    
3350
    secondary_nodes = instance.secondary_nodes
3351
    if not secondary_nodes:
3352
      raise errors.ProgrammerError("no secondary node but using "
3353
                                   "a mirrored disk template")
3354

    
3355
    target_node = secondary_nodes[0]
3356
    _CheckNodeOnline(self, target_node)
3357
    # check memory requirements on the secondary node
3358
    _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
3359
                         instance.name, bep[constants.BE_MEMORY],
3360
                         instance.hypervisor)
3361

    
3362
    # check bridge existance
3363
    brlist = [nic.bridge for nic in instance.nics]
3364
    result = self.rpc.call_bridges_exist(target_node, brlist)
3365
    result.Raise()
3366
    if not result.data:
3367
      raise errors.OpPrereqError("One or more target bridges %s does not"
3368
                                 " exist on destination node '%s'" %
3369
                                 (brlist, target_node))
3370

    
3371
  def Exec(self, feedback_fn):
3372
    """Failover an instance.
3373

3374
    The failover is done by shutting it down on its present node and
3375
    starting it on the secondary.
3376

3377
    """
3378
    instance = self.instance
3379

    
3380
    source_node = instance.primary_node
3381
    target_node = instance.secondary_nodes[0]
3382

    
3383
    feedback_fn("* checking disk consistency between source and target")
3384
    for dev in instance.disks:
3385
      # for drbd, these are drbd over lvm
3386
      if not _CheckDiskConsistency(self, dev, target_node, False):
3387
        if instance.admin_up and not self.op.ignore_consistency:
3388
          raise errors.OpExecError("Disk %s is degraded on target node,"
3389
                                   " aborting failover." % dev.iv_name)
3390

    
3391
    feedback_fn("* shutting down instance on source node")
3392
    logging.info("Shutting down instance %s on node %s",
3393
                 instance.name, source_node)
3394

    
3395
    result = self.rpc.call_instance_shutdown(source_node, instance)
3396
    if result.failed or not result.data:
3397
      if self.op.ignore_consistency:
3398
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
3399
                             " Proceeding"
3400
                             " anyway. Please make sure node %s is down",
3401
                             instance.name, source_node, source_node)
3402
      else:
3403
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
3404
                                 (instance.name, source_node))
3405

    
3406
    feedback_fn("* deactivating the instance's disks on source node")
3407
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3408
      raise errors.OpExecError("Can't shut down the instance's disks.")
3409

    
3410
    instance.primary_node = target_node
3411
    # distribute new instance config to the other nodes
3412
    self.cfg.Update(instance)
3413

    
3414
    # Only start the instance if it's marked as up
3415
    if instance.admin_up:
3416
      feedback_fn("* activating the instance's disks on target node")
3417
      logging.info("Starting instance %s on node %s",
3418
                   instance.name, target_node)
3419

    
3420
      disks_ok, dummy = _AssembleInstanceDisks(self, instance,
3421
                                               ignore_secondaries=True)
3422
      if not disks_ok:
3423
        _ShutdownInstanceDisks(self, instance)
3424
        raise errors.OpExecError("Can't activate the instance's disks")
3425

    
3426
      feedback_fn("* starting the instance on the target node")
3427
      result = self.rpc.call_instance_start(target_node, instance, None)
3428
      msg = result.RemoteFailMsg()
3429
      if msg:
3430
        _ShutdownInstanceDisks(self, instance)
3431
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
3432
                                 (instance.name, target_node, msg))
3433

    
3434

    
3435
class LUMigrateInstance(LogicalUnit):
3436
  """Migrate an instance.
3437

3438
  This is migration without shutting down, compared to the failover,
3439
  which is done with shutdown.
3440

3441
  """
3442
  HPATH = "instance-migrate"
3443
  HTYPE = constants.HTYPE_INSTANCE
3444
  _OP_REQP = ["instance_name", "live", "cleanup"]
3445

    
3446
  REQ_BGL = False
3447

    
3448
  def ExpandNames(self):
3449
    self._ExpandAndLockInstance()
3450
    self.needed_locks[locking.LEVEL_NODE] = []
3451
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3452

    
3453
  def DeclareLocks(self, level):
3454
    if level == locking.LEVEL_NODE:
3455
      self._LockInstancesNodes()
3456

    
3457
  def BuildHooksEnv(self):
3458
    """Build hooks env.
3459

3460
    This runs on master, primary and secondary nodes of the instance.
3461

3462
    """
3463
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3464
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3465
    return env, nl, nl
3466

    
3467
  def CheckPrereq(self):
3468
    """Check prerequisites.
3469

3470
    This checks that the instance is in the cluster.
3471

3472
    """
3473
    instance = self.cfg.GetInstanceInfo(
3474
      self.cfg.ExpandInstanceName(self.op.instance_name))
3475
    if instance is None:
3476
      raise errors.OpPrereqError("Instance '%s' not known" %
3477
                                 self.op.instance_name)
3478

    
3479
    if instance.disk_template != constants.DT_DRBD8:
3480
      raise errors.OpPrereqError("Instance's disk layout is not"
3481
                                 " drbd8, cannot migrate.")
3482

    
3483
    secondary_nodes = instance.secondary_nodes
3484
    if not secondary_nodes:
3485
      raise errors.ProgrammerError("no secondary node but using "
3486
                                   "drbd8 disk template")
3487

    
3488
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
3489

    
3490
    target_node = secondary_nodes[0]
3491
    # check memory requirements on the secondary node
3492
    _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
3493
                         instance.name, i_be[constants.BE_MEMORY],
3494
                         instance.hypervisor)
3495

    
3496
    # check bridge existance
3497
    brlist = [nic.bridge for nic in instance.nics]
3498
    result = self.rpc.call_bridges_exist(target_node, brlist)
3499
    if result.failed or not result.data:
3500
      raise errors.OpPrereqError("One or more target bridges %s does not"
3501
                                 " exist on destination node '%s'" %
3502
                                 (brlist, target_node))
3503

    
3504
    if not self.op.cleanup:
3505
      result = self.rpc.call_instance_migratable(instance.primary_node,
3506
                                                 instance)
3507
      msg = result.RemoteFailMsg()
3508
      if msg:
3509
        raise errors.OpPrereqError("Can't migrate: %s - please use failover" %
3510
                                   msg)
3511

    
3512
    self.instance = instance
3513

    
3514
  def _WaitUntilSync(self):
3515
    """Poll with custom rpc for disk sync.
3516

3517
    This uses our own step-based rpc call.
3518

3519
    """
3520
    self.feedback_fn("* wait until resync is done")
3521
    all_done = False
3522
    while not all_done:
3523
      all_done = True
3524
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
3525
                                            self.nodes_ip,
3526
                                            self.instance.disks)
3527
      min_percent = 100
3528
      for node, nres in result.items():
3529
        msg = nres.RemoteFailMsg()
3530
        if msg:
3531
          raise errors.OpExecError("Cannot resync disks on node %s: %s" %
3532
                                   (node, msg))
3533
        node_done, node_percent = nres.data[1]
3534
        all_done = all_done and node_done
3535
        if node_percent is not None:
3536
          min_percent = min(min_percent, node_percent)
3537
      if not all_done:
3538
        if min_percent < 100:
3539
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
3540
        time.sleep(2)
3541

    
3542
  def _EnsureSecondary(self, node):
3543
    """Demote a node to secondary.
3544

3545
    """
3546
    self.feedback_fn("* switching node %s to secondary mode" % node)
3547

    
3548
    for dev in self.instance.disks:
3549
      self.cfg.SetDiskID(dev, node)
3550

    
3551
    result = self.rpc.call_blockdev_close(node, self.instance.name,
3552
                                          self.instance.disks)
3553
    msg = result.RemoteFailMsg()
3554
    if msg:
3555
      raise errors.OpExecError("Cannot change disk to secondary on node %s,"
3556
                               " error %s" % (node, msg))
3557

    
3558
  def _GoStandalone(self):
3559
    """Disconnect from the network.
3560

3561
    """
3562
    self.feedback_fn("* changing into standalone mode")
3563
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
3564
                                               self.instance.disks)
3565
    for node, nres in result.items():
3566
      msg = nres.RemoteFailMsg()
3567
      if msg:
3568
        raise errors.OpExecError("Cannot disconnect disks node %s,"
3569
                                 " error %s" % (node, msg))
3570

    
3571
  def _GoReconnect(self, multimaster):
3572
    """Reconnect to the network.
3573

3574
    """
3575
    if multimaster:
3576
      msg = "dual-master"
3577
    else:
3578
      msg = "single-master"
3579
    self.feedback_fn("* changing disks into %s mode" % msg)
3580
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
3581
                                           self.instance.disks,
3582
                                           self.instance.name, multimaster)
3583
    for node, nres in result.items():
3584
      msg = nres.RemoteFailMsg()
3585
      if msg:
3586
        raise errors.OpExecError("Cannot change disks config on node %s,"
3587
                                 " error: %s" % (node, msg))
3588

    
3589
  def _ExecCleanup(self):
3590
    """Try to cleanup after a failed migration.
3591

3592
    The cleanup is done by:
3593
      - check that the instance is running only on one node
3594
        (and update the config if needed)
3595
      - change disks on its secondary node to secondary
3596
      - wait until disks are fully synchronized
3597
      - disconnect from the network
3598
      - change disks into single-master mode
3599
      - wait again until disks are fully synchronized
3600

3601
    """
3602
    instance = self.instance
3603
    target_node = self.target_node
3604
    source_node = self.source_node
3605

    
3606
    # check running on only one node
3607
    self.feedback_fn("* checking where the instance actually runs"
3608
                     " (if this hangs, the hypervisor might be in"
3609
                     " a bad state)")
3610
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
3611
    for node, result in ins_l.items():
3612
      result.Raise()
3613
      if not isinstance(result.data, list):
3614
        raise errors.OpExecError("Can't contact node '%s'" % node)
3615

    
3616
    runningon_source = instance.name in ins_l[source_node].data
3617
    runningon_target = instance.name in ins_l[target_node].data
3618

    
3619
    if runningon_source and runningon_target:
3620
      raise errors.OpExecError("Instance seems to be running on two nodes,"
3621
                               " or the hypervisor is confused. You will have"
3622
                               " to ensure manually that it runs only on one"
3623
                               " and restart this operation.")
3624

    
3625
    if not (runningon_source or runningon_target):
3626
      raise errors.OpExecError("Instance does not seem to be running at all."
3627
                               " In this case, it's safer to repair by"
3628
                               " running 'gnt-instance stop' to ensure disk"
3629
                               " shutdown, and then restarting it.")
3630

    
3631
    if runningon_target:
3632
      # the migration has actually succeeded, we need to update the config
3633
      self.feedback_fn("* instance running on secondary node (%s),"
3634
                       " updating config" % target_node)
3635
      instance.primary_node = target_node
3636
      self.cfg.Update(instance)
3637
      demoted_node = source_node
3638
    else:
3639
      self.feedback_fn("* instance confirmed to be running on its"
3640
                       " primary node (%s)" % source_node)
3641
      demoted_node = target_node
3642

    
3643
    self._EnsureSecondary(demoted_node)
3644
    try:
3645
      self._WaitUntilSync()
3646
    except errors.OpExecError:
3647
      # we ignore here errors, since if the device is standalone, it
3648
      # won't be able to sync
3649
      pass
3650
    self._GoStandalone()
3651
    self._GoReconnect(False)
3652
    self._WaitUntilSync()
3653

    
3654
    self.feedback_fn("* done")
3655

    
3656
  def _RevertDiskStatus(self):
3657
    """Try to revert the disk status after a failed migration.
3658

3659
    """
3660
    target_node = self.target_node
3661
    try:
3662
      self._EnsureSecondary(target_node)
3663
      self._GoStandalone()
3664
      self._GoReconnect(False)
3665
      self._WaitUntilSync()
3666
    except errors.OpExecError, err:
3667
      self.LogWarning("Migration failed and I can't reconnect the"
3668
                      " drives: error '%s'\n"
3669
                      "Please look and recover the instance status" %
3670
                      str(err))
3671

    
3672
  def _AbortMigration(self):
3673
    """Call the hypervisor code to abort a started migration.
3674

3675
    """
3676
    instance = self.instance
3677
    target_node = self.target_node
3678
    migration_info = self.migration_info
3679

    
3680
    abort_result = self.rpc.call_finalize_migration(target_node,
3681
                                                    instance,
3682
                                                    migration_info,
3683
                                                    False)
3684
    abort_msg = abort_result.RemoteFailMsg()
3685
    if abort_msg:
3686
      logging.error("Aborting migration failed on target node %s: %s" %
3687
                    (target_node, abort_msg))
3688
      # Don't raise an exception here, as we stil have to try to revert the
3689
      # disk status, even if this step failed.
3690

    
3691
  def _ExecMigration(self):
3692
    """Migrate an instance.
3693

3694
    The migrate is done by:
3695
      - change the disks into dual-master mode
3696
      - wait until disks are fully synchronized again
3697
      - migrate the instance
3698
      - change disks on the new secondary node (the old primary) to secondary
3699
      - wait until disks are fully synchronized
3700
      - change disks into single-master mode
3701

3702
    """
3703
    instance = self.instance
3704
    target_node = self.target_node
3705
    source_node = self.source_node
3706

    
3707
    self.feedback_fn("* checking disk consistency between source and target")
3708
    for dev in instance.disks:
3709
      if not _CheckDiskConsistency(self, dev, target_node, False):
3710
        raise errors.OpExecError("Disk %s is degraded or not fully"
3711
                                 " synchronized on target node,"
3712
                                 " aborting migrate." % dev.iv_name)
3713

    
3714
    # First get the migration information from the remote node
3715
    result = self.rpc.call_migration_info(source_node, instance)
3716
    msg = result.RemoteFailMsg()
3717
    if msg:
3718
      log_err = ("Failed fetching source migration information from %s: %s" %
3719
                  (source_node, msg))
3720
      logging.error(log_err)
3721
      raise errors.OpExecError(log_err)
3722

    
3723
    self.migration_info = migration_info = result.data[1]
3724

    
3725
    # Then switch the disks to master/master mode
3726
    self._EnsureSecondary(target_node)
3727
    self._GoStandalone()
3728
    self._GoReconnect(True)
3729
    self._WaitUntilSync()
3730

    
3731
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
3732
    result = self.rpc.call_accept_instance(target_node,
3733
                                           instance,
3734
                                           migration_info,
3735
                                           self.nodes_ip[target_node])
3736

    
3737
    msg = result.RemoteFailMsg()
3738
    if msg:
3739
      logging.error("Instance pre-migration failed, trying to revert"
3740
                    " disk status: %s", msg)
3741
      self._AbortMigration()
3742
      self._RevertDiskStatus()
3743
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
3744
                               (instance.name, msg))
3745

    
3746
    self.feedback_fn("* migrating instance to %s" % target_node)
3747
    time.sleep(10)
3748
    result = self.rpc.call_instance_migrate(source_node, instance,
3749
                                            self.nodes_ip[target_node],
3750
                                            self.op.live)
3751
    msg = result.RemoteFailMsg()
3752
    if msg:
3753
      logging.error("Instance migration failed, trying to revert"
3754
                    " disk status: %s", msg)
3755
      self._AbortMigration()
3756
      self._RevertDiskStatus()
3757
      raise errors.OpExecError("Could not migrate instance %s: %s" %
3758
                               (instance.name, msg))
3759
    time.sleep(10)
3760

    
3761
    instance.primary_node = target_node
3762
    # distribute new instance config to the other nodes
3763
    self.cfg.Update(instance)
3764

    
3765
    result = self.rpc.call_finalize_migration(target_node,
3766
                                              instance,
3767
                                              migration_info,
3768
                                              True)
3769
    msg = result.RemoteFailMsg()
3770
    if msg:
3771
      logging.error("Instance migration succeeded, but finalization failed:"
3772
                    " %s" % msg)
3773
      raise errors.OpExecError("Could not finalize instance migration: %s" %
3774
                               msg)
3775

    
3776
    self._EnsureSecondary(source_node)
3777
    self._WaitUntilSync()
3778
    self._GoStandalone()
3779
    self._GoReconnect(False)
3780
    self._WaitUntilSync()
3781

    
3782
    self.feedback_fn("* done")
3783

    
3784
  def Exec(self, feedback_fn):
3785
    """Perform the migration.
3786

3787
    """
3788
    self.feedback_fn = feedback_fn
3789

    
3790
    self.source_node = self.instance.primary_node
3791
    self.target_node = self.instance.secondary_nodes[0]
3792
    self.all_nodes = [self.source_node, self.target_node]
3793
    self.nodes_ip = {
3794
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
3795
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
3796
      }
3797
    if self.op.cleanup:
3798
      return self._ExecCleanup()
3799
    else:
3800
      return self._ExecMigration()
3801

    
3802

    
3803
def _CreateBlockDev(lu, node, instance, device, force_create,
3804
                    info, force_open):
3805
  """Create a tree of block devices on a given node.
3806

3807
  If this device type has to be created on secondaries, create it and
3808
  all its children.
3809

3810
  If not, just recurse to children keeping the same 'force' value.
3811

3812
  @param lu: the lu on whose behalf we execute
3813
  @param node: the node on which to create the device
3814
  @type instance: L{objects.Instance}
3815
  @param instance: the instance which owns the device
3816
  @type device: L{objects.Disk}
3817
  @param device: the device to create
3818
  @type force_create: boolean
3819
  @param force_create: whether to force creation of this device; this
3820
      will be change to True whenever we find a device which has
3821
      CreateOnSecondary() attribute
3822
  @param info: the extra 'metadata' we should attach to the device
3823
      (this will be represented as a LVM tag)
3824
  @type force_open: boolean
3825
  @param force_open: this parameter will be passes to the
3826
      L{backend.CreateBlockDevice} function where it specifies
3827
      whether we run on primary or not, and it affects both
3828
      the child assembly and the device own Open() execution
3829

3830
  """
3831
  if device.CreateOnSecondary():
3832
    force_create = True
3833

    
3834
  if device.children:
3835
    for child in device.children:
3836
      _CreateBlockDev(lu, node, instance, child, force_create,
3837
                      info, force_open)
3838

    
3839
  if not force_create:
3840
    return
3841

    
3842
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
3843

    
3844

    
3845
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
3846
  """Create a single block device on a given node.
3847

3848
  This will not recurse over children of the device, so they must be
3849
  created in advance.
3850

3851
  @param lu: the lu on whose behalf we execute
3852
  @param node: the node on which to create the device
3853
  @type instance: L{objects.Instance}
3854
  @param instance: the instance which owns the device
3855
  @type device: L{objects.Disk}
3856
  @param device: the device to create
3857
  @param info: the extra 'metadata' we should attach to the device
3858
      (this will be represented as a LVM tag)
3859
  @type force_open: boolean
3860
  @param force_open: this parameter will be passes to the
3861
      L{backend.CreateBlockDevice} function where it specifies
3862
      whether we run on primary or not, and it affects both
3863
      the child assembly and the device own Open() execution
3864

3865
  """
3866
  lu.cfg.SetDiskID(device, node)
3867
  result = lu.rpc.call_blockdev_create(node, device, device.size,
3868
                                       instance.name, force_open, info)
3869
  msg = result.RemoteFailMsg()
3870
  if msg:
3871
    raise errors.OpExecError("Can't create block device %s on"
3872
                             " node %s for instance %s: %s" %
3873
                             (device, node, instance.name, msg))
3874
  if device.physical_id is None:
3875
    device.physical_id = result.data[1]
3876

    
3877

    
3878
def _GenerateUniqueNames(lu, exts):
3879
  """Generate a suitable LV name.
3880

3881
  This will generate a logical volume name for the given instance.
3882

3883
  """
3884
  results = []
3885
  for val in exts:
3886
    new_id = lu.cfg.GenerateUniqueID()
3887
    results.append("%s%s" % (new_id, val))
3888
  return results
3889

    
3890

    
3891
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
3892
                         p_minor, s_minor):
3893
  """Generate a drbd8 device complete with its children.
3894

3895
  """
3896
  port = lu.cfg.AllocatePort()
3897
  vgname = lu.cfg.GetVGName()
3898
  shared_secret = lu.cfg.GenerateDRBDSecret()
3899
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3900
                          logical_id=(vgname, names[0]))
3901
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3902
                          logical_id=(vgname, names[1]))
3903
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
3904
                          logical_id=(primary, secondary, port,
3905
                                      p_minor, s_minor,
3906
                                      shared_secret),
3907
                          children=[dev_data, dev_meta],
3908
                          iv_name=iv_name)
3909
  return drbd_dev
3910

    
3911

    
3912
def _GenerateDiskTemplate(lu, template_name,
3913
                          instance_name, primary_node,
3914
                          secondary_nodes, disk_info,
3915
                          file_storage_dir, file_driver,
3916
                          base_index):
3917
  """Generate the entire disk layout for a given template type.
3918

3919
  """
3920
  #TODO: compute space requirements
3921

    
3922
  vgname = lu.cfg.GetVGName()
3923
  disk_count = len(disk_info)
3924
  disks = []
3925
  if template_name == constants.DT_DISKLESS:
3926
    pass
3927
  elif template_name == constants.DT_PLAIN:
3928
    if len(secondary_nodes) != 0:
3929
      raise errors.ProgrammerError("Wrong template configuration")
3930

    
3931
    names = _GenerateUniqueNames(lu, [".disk%d" % i
3932
                                      for i in range(disk_count)])
3933
    for idx, disk in enumerate(disk_info):
3934
      disk_index = idx + base_index
3935
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
3936
                              logical_id=(vgname, names[idx]),
3937
                              iv_name="disk/%d" % disk_index,
3938
                              mode=disk["mode"])
3939
      disks.append(disk_dev)
3940
  elif template_name == constants.DT_DRBD8:
3941
    if len(secondary_nodes) != 1:
3942
      raise errors.ProgrammerError("Wrong template configuration")
3943
    remote_node = secondary_nodes[0]
3944
    minors = lu.cfg.AllocateDRBDMinor(
3945
      [primary_node, remote_node] * len(disk_info), instance_name)
3946

    
3947
    names = []
3948
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % i
3949
                                               for i in range(disk_count)]):
3950
      names.append(lv_prefix + "_data")
3951
      names.append(lv_prefix + "_meta")
3952
    for idx, disk in enumerate(disk_info):
3953
      disk_index = idx + base_index
3954
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
3955
                                      disk["size"], names[idx*2:idx*2+2],
3956
                                      "disk/%d" % disk_index,
3957
                                      minors[idx*2], minors[idx*2+1])
3958
      disk_dev.mode = disk["mode"]
3959
      disks.append(disk_dev)
3960
  elif template_name == constants.DT_FILE:
3961
    if len(secondary_nodes) != 0:
3962
      raise errors.ProgrammerError("Wrong template configuration")
3963

    
3964
    for idx, disk in enumerate(disk_info):
3965
      disk_index = idx + base_index
3966
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
3967
                              iv_name="disk/%d" % disk_index,
3968
                              logical_id=(file_driver,
3969
                                          "%s/disk%d" % (file_storage_dir,
3970
                                                         idx)),
3971
                              mode=disk["mode"])
3972
      disks.append(disk_dev)
3973
  else:
3974
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
3975
  return disks
3976

    
3977

    
3978
def _GetInstanceInfoText(instance):
3979
  """Compute that text that should be added to the disk's metadata.
3980

3981
  """
3982
  return "originstname+%s" % instance.name
3983

    
3984

    
3985
def _CreateDisks(lu, instance):
3986
  """Create all disks for an instance.
3987

3988
  This abstracts away some work from AddInstance.
3989

3990
  @type lu: L{LogicalUnit}
3991
  @param lu: the logical unit on whose behalf we execute
3992
  @type instance: L{objects.Instance}
3993
  @param instance: the instance whose disks we should create
3994
  @rtype: boolean
3995
  @return: the success of the creation
3996

3997
  """
3998
  info = _GetInstanceInfoText(instance)
3999
  pnode = instance.primary_node
4000

    
4001
  if instance.disk_template == constants.DT_FILE:
4002
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4003
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
4004

    
4005
    if result.failed or not result.data:
4006
      raise errors.OpExecError("Could not connect to node '%s'" % pnode)
4007

    
4008
    if not result.data[0]:
4009
      raise errors.OpExecError("Failed to create directory '%s'" %
4010
                               file_storage_dir)
4011

    
4012
  # Note: this needs to be kept in sync with adding of disks in
4013
  # LUSetInstanceParams
4014
  for device in instance.disks:
4015
    logging.info("Creating volume %s for instance %s",
4016
                 device.iv_name, instance.name)
4017
    #HARDCODE
4018