Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 733a2b6a

History | View | Annotate | Download (235.4 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the master-side code."""
23

    
24
# pylint: disable-msg=W0613,W0201
25

    
26
import os
27
import os.path
28
import sha
29
import time
30
import tempfile
31
import re
32
import platform
33
import logging
34
import copy
35
import random
36

    
37
from ganeti import ssh
38
from ganeti import utils
39
from ganeti import errors
40
from ganeti import hypervisor
41
from ganeti import locking
42
from ganeti import constants
43
from ganeti import objects
44
from ganeti import opcodes
45
from ganeti import serializer
46
from ganeti import ssconf
47

    
48

    
49
class LogicalUnit(object):
50
  """Logical Unit base class.
51

52
  Subclasses must follow these rules:
53
    - implement ExpandNames
54
    - implement CheckPrereq
55
    - implement Exec
56
    - implement BuildHooksEnv
57
    - redefine HPATH and HTYPE
58
    - optionally redefine their run requirements:
59
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
60

61
  Note that all commands require root permissions.
62

63
  """
64
  HPATH = None
65
  HTYPE = None
66
  _OP_REQP = []
67
  REQ_BGL = True
68

    
69
  def __init__(self, processor, op, context, rpc):
70
    """Constructor for LogicalUnit.
71

72
    This needs to be overriden in derived classes in order to check op
73
    validity.
74

75
    """
76
    self.proc = processor
77
    self.op = op
78
    self.cfg = context.cfg
79
    self.context = context
80
    self.rpc = rpc
81
    # Dicts used to declare locking needs to mcpu
82
    self.needed_locks = None
83
    self.acquired_locks = {}
84
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
85
    self.add_locks = {}
86
    self.remove_locks = {}
87
    # Used to force good behavior when calling helper functions
88
    self.recalculate_locks = {}
89
    self.__ssh = None
90
    # logging
91
    self.LogWarning = processor.LogWarning
92
    self.LogInfo = processor.LogInfo
93

    
94
    for attr_name in self._OP_REQP:
95
      attr_val = getattr(op, attr_name, None)
96
      if attr_val is None:
97
        raise errors.OpPrereqError("Required parameter '%s' missing" %
98
                                   attr_name)
99
    self.CheckArguments()
100

    
101
  def __GetSSH(self):
102
    """Returns the SshRunner object
103

104
    """
105
    if not self.__ssh:
106
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
107
    return self.__ssh
108

    
109
  ssh = property(fget=__GetSSH)
110

    
111
  def CheckArguments(self):
112
    """Check syntactic validity for the opcode arguments.
113

114
    This method is for doing a simple syntactic check and ensure
115
    validity of opcode parameters, without any cluster-related
116
    checks. While the same can be accomplished in ExpandNames and/or
117
    CheckPrereq, doing these separate is better because:
118

119
      - ExpandNames is left as as purely a lock-related function
120
      - CheckPrereq is run after we have aquired locks (and possible
121
        waited for them)
122

123
    The function is allowed to change the self.op attribute so that
124
    later methods can no longer worry about missing parameters.
125

126
    """
127
    pass
128

    
129
  def ExpandNames(self):
130
    """Expand names for this LU.
131

132
    This method is called before starting to execute the opcode, and it should
133
    update all the parameters of the opcode to their canonical form (e.g. a
134
    short node name must be fully expanded after this method has successfully
135
    completed). This way locking, hooks, logging, ecc. can work correctly.
136

137
    LUs which implement this method must also populate the self.needed_locks
138
    member, as a dict with lock levels as keys, and a list of needed lock names
139
    as values. Rules:
140

141
      - use an empty dict if you don't need any lock
142
      - if you don't need any lock at a particular level omit that level
143
      - don't put anything for the BGL level
144
      - if you want all locks at a level use locking.ALL_SET as a value
145

146
    If you need to share locks (rather than acquire them exclusively) at one
147
    level you can modify self.share_locks, setting a true value (usually 1) for
148
    that level. By default locks are not shared.
149

150
    Examples::
151

152
      # Acquire all nodes and one instance
153
      self.needed_locks = {
154
        locking.LEVEL_NODE: locking.ALL_SET,
155
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
156
      }
157
      # Acquire just two nodes
158
      self.needed_locks = {
159
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
160
      }
161
      # Acquire no locks
162
      self.needed_locks = {} # No, you can't leave it to the default value None
163

164
    """
165
    # The implementation of this method is mandatory only if the new LU is
166
    # concurrent, so that old LUs don't need to be changed all at the same
167
    # time.
168
    if self.REQ_BGL:
169
      self.needed_locks = {} # Exclusive LUs don't need locks.
170
    else:
171
      raise NotImplementedError
172

    
173
  def DeclareLocks(self, level):
174
    """Declare LU locking needs for a level
175

176
    While most LUs can just declare their locking needs at ExpandNames time,
177
    sometimes there's the need to calculate some locks after having acquired
178
    the ones before. This function is called just before acquiring locks at a
179
    particular level, but after acquiring the ones at lower levels, and permits
180
    such calculations. It can be used to modify self.needed_locks, and by
181
    default it does nothing.
182

183
    This function is only called if you have something already set in
184
    self.needed_locks for the level.
185

186
    @param level: Locking level which is going to be locked
187
    @type level: member of ganeti.locking.LEVELS
188

189
    """
190

    
191
  def CheckPrereq(self):
192
    """Check prerequisites for this LU.
193

194
    This method should check that the prerequisites for the execution
195
    of this LU are fulfilled. It can do internode communication, but
196
    it should be idempotent - no cluster or system changes are
197
    allowed.
198

199
    The method should raise errors.OpPrereqError in case something is
200
    not fulfilled. Its return value is ignored.
201

202
    This method should also update all the parameters of the opcode to
203
    their canonical form if it hasn't been done by ExpandNames before.
204

205
    """
206
    raise NotImplementedError
207

    
208
  def Exec(self, feedback_fn):
209
    """Execute the LU.
210

211
    This method should implement the actual work. It should raise
212
    errors.OpExecError for failures that are somewhat dealt with in
213
    code, or expected.
214

215
    """
216
    raise NotImplementedError
217

    
218
  def BuildHooksEnv(self):
219
    """Build hooks environment for this LU.
220

221
    This method should return a three-node tuple consisting of: a dict
222
    containing the environment that will be used for running the
223
    specific hook for this LU, a list of node names on which the hook
224
    should run before the execution, and a list of node names on which
225
    the hook should run after the execution.
226

227
    The keys of the dict must not have 'GANETI_' prefixed as this will
228
    be handled in the hooks runner. Also note additional keys will be
229
    added by the hooks runner. If the LU doesn't define any
230
    environment, an empty dict (and not None) should be returned.
231

232
    No nodes should be returned as an empty list (and not None).
233

234
    Note that if the HPATH for a LU class is None, this function will
235
    not be called.
236

237
    """
238
    raise NotImplementedError
239

    
240
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
241
    """Notify the LU about the results of its hooks.
242

243
    This method is called every time a hooks phase is executed, and notifies
244
    the Logical Unit about the hooks' result. The LU can then use it to alter
245
    its result based on the hooks.  By default the method does nothing and the
246
    previous result is passed back unchanged but any LU can define it if it
247
    wants to use the local cluster hook-scripts somehow.
248

249
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
250
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
251
    @param hook_results: the results of the multi-node hooks rpc call
252
    @param feedback_fn: function used send feedback back to the caller
253
    @param lu_result: the previous Exec result this LU had, or None
254
        in the PRE phase
255
    @return: the new Exec result, based on the previous result
256
        and hook results
257

258
    """
259
    return lu_result
260

    
261
  def _ExpandAndLockInstance(self):
262
    """Helper function to expand and lock an instance.
263

264
    Many LUs that work on an instance take its name in self.op.instance_name
265
    and need to expand it and then declare the expanded name for locking. This
266
    function does it, and then updates self.op.instance_name to the expanded
267
    name. It also initializes needed_locks as a dict, if this hasn't been done
268
    before.
269

270
    """
271
    if self.needed_locks is None:
272
      self.needed_locks = {}
273
    else:
274
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
275
        "_ExpandAndLockInstance called with instance-level locks set"
276
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
277
    if expanded_name is None:
278
      raise errors.OpPrereqError("Instance '%s' not known" %
279
                                  self.op.instance_name)
280
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
281
    self.op.instance_name = expanded_name
282

    
283
  def _LockInstancesNodes(self, primary_only=False):
284
    """Helper function to declare instances' nodes for locking.
285

286
    This function should be called after locking one or more instances to lock
287
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
288
    with all primary or secondary nodes for instances already locked and
289
    present in self.needed_locks[locking.LEVEL_INSTANCE].
290

291
    It should be called from DeclareLocks, and for safety only works if
292
    self.recalculate_locks[locking.LEVEL_NODE] is set.
293

294
    In the future it may grow parameters to just lock some instance's nodes, or
295
    to just lock primaries or secondary nodes, if needed.
296

297
    If should be called in DeclareLocks in a way similar to::
298

299
      if level == locking.LEVEL_NODE:
300
        self._LockInstancesNodes()
301

302
    @type primary_only: boolean
303
    @param primary_only: only lock primary nodes of locked instances
304

305
    """
306
    assert locking.LEVEL_NODE in self.recalculate_locks, \
307
      "_LockInstancesNodes helper function called with no nodes to recalculate"
308

    
309
    # TODO: check if we're really been called with the instance locks held
310

    
311
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
312
    # future we might want to have different behaviors depending on the value
313
    # of self.recalculate_locks[locking.LEVEL_NODE]
314
    wanted_nodes = []
315
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
316
      instance = self.context.cfg.GetInstanceInfo(instance_name)
317
      wanted_nodes.append(instance.primary_node)
318
      if not primary_only:
319
        wanted_nodes.extend(instance.secondary_nodes)
320

    
321
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
322
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
323
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
324
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
325

    
326
    del self.recalculate_locks[locking.LEVEL_NODE]
327

    
328

    
329
class NoHooksLU(LogicalUnit):
330
  """Simple LU which runs no hooks.
331

332
  This LU is intended as a parent for other LogicalUnits which will
333
  run no hooks, in order to reduce duplicate code.
334

335
  """
336
  HPATH = None
337
  HTYPE = None
338

    
339

    
340
def _GetWantedNodes(lu, nodes):
341
  """Returns list of checked and expanded node names.
342

343
  @type lu: L{LogicalUnit}
344
  @param lu: the logical unit on whose behalf we execute
345
  @type nodes: list
346
  @param nodes: list of node names or None for all nodes
347
  @rtype: list
348
  @return: the list of nodes, sorted
349
  @raise errors.OpProgrammerError: if the nodes parameter is wrong type
350

351
  """
352
  if not isinstance(nodes, list):
353
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
354

    
355
  if not nodes:
356
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
357
      " non-empty list of nodes whose name is to be expanded.")
358

    
359
  wanted = []
360
  for name in nodes:
361
    node = lu.cfg.ExpandNodeName(name)
362
    if node is None:
363
      raise errors.OpPrereqError("No such node name '%s'" % name)
364
    wanted.append(node)
365

    
366
  return utils.NiceSort(wanted)
367

    
368

    
369
def _GetWantedInstances(lu, instances):
370
  """Returns list of checked and expanded instance names.
371

372
  @type lu: L{LogicalUnit}
373
  @param lu: the logical unit on whose behalf we execute
374
  @type instances: list
375
  @param instances: list of instance names or None for all instances
376
  @rtype: list
377
  @return: the list of instances, sorted
378
  @raise errors.OpPrereqError: if the instances parameter is wrong type
379
  @raise errors.OpPrereqError: if any of the passed instances is not found
380

381
  """
382
  if not isinstance(instances, list):
383
    raise errors.OpPrereqError("Invalid argument type 'instances'")
384

    
385
  if instances:
386
    wanted = []
387

    
388
    for name in instances:
389
      instance = lu.cfg.ExpandInstanceName(name)
390
      if instance is None:
391
        raise errors.OpPrereqError("No such instance name '%s'" % name)
392
      wanted.append(instance)
393

    
394
  else:
395
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
396
  return wanted
397

    
398

    
399
def _CheckOutputFields(static, dynamic, selected):
400
  """Checks whether all selected fields are valid.
401

402
  @type static: L{utils.FieldSet}
403
  @param static: static fields set
404
  @type dynamic: L{utils.FieldSet}
405
  @param dynamic: dynamic fields set
406

407
  """
408
  f = utils.FieldSet()
409
  f.Extend(static)
410
  f.Extend(dynamic)
411

    
412
  delta = f.NonMatching(selected)
413
  if delta:
414
    raise errors.OpPrereqError("Unknown output fields selected: %s"
415
                               % ",".join(delta))
416

    
417

    
418
def _CheckBooleanOpField(op, name):
419
  """Validates boolean opcode parameters.
420

421
  This will ensure that an opcode parameter is either a boolean value,
422
  or None (but that it always exists).
423

424
  """
425
  val = getattr(op, name, None)
426
  if not (val is None or isinstance(val, bool)):
427
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
428
                               (name, str(val)))
429
  setattr(op, name, val)
430

    
431

    
432
def _CheckNodeOnline(lu, node):
433
  """Ensure that a given node is online.
434

435
  @param lu: the LU on behalf of which we make the check
436
  @param node: the node to check
437
  @raise errors.OpPrereqError: if the node is offline
438

439
  """
440
  if lu.cfg.GetNodeInfo(node).offline:
441
    raise errors.OpPrereqError("Can't use offline node %s" % node)
442

    
443

    
444
def _CheckNodeNotDrained(lu, node):
445
  """Ensure that a given node is not drained.
446

447
  @param lu: the LU on behalf of which we make the check
448
  @param node: the node to check
449
  @raise errors.OpPrereqError: if the node is drained
450

451
  """
452
  if lu.cfg.GetNodeInfo(node).drained:
453
    raise errors.OpPrereqError("Can't use drained node %s" % node)
454

    
455

    
456
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
457
                          memory, vcpus, nics):
458
  """Builds instance related env variables for hooks
459

460
  This builds the hook environment from individual variables.
461

462
  @type name: string
463
  @param name: the name of the instance
464
  @type primary_node: string
465
  @param primary_node: the name of the instance's primary node
466
  @type secondary_nodes: list
467
  @param secondary_nodes: list of secondary nodes as strings
468
  @type os_type: string
469
  @param os_type: the name of the instance's OS
470
  @type status: boolean
471
  @param status: the should_run status of the instance
472
  @type memory: string
473
  @param memory: the memory size of the instance
474
  @type vcpus: string
475
  @param vcpus: the count of VCPUs the instance has
476
  @type nics: list
477
  @param nics: list of tuples (ip, bridge, mac) representing
478
      the NICs the instance  has
479
  @rtype: dict
480
  @return: the hook environment for this instance
481

482
  """
483
  if status:
484
    str_status = "up"
485
  else:
486
    str_status = "down"
487
  env = {
488
    "OP_TARGET": name,
489
    "INSTANCE_NAME": name,
490
    "INSTANCE_PRIMARY": primary_node,
491
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
492
    "INSTANCE_OS_TYPE": os_type,
493
    "INSTANCE_STATUS": str_status,
494
    "INSTANCE_MEMORY": memory,
495
    "INSTANCE_VCPUS": vcpus,
496
  }
497

    
498
  if nics:
499
    nic_count = len(nics)
500
    for idx, (ip, bridge, mac) in enumerate(nics):
501
      if ip is None:
502
        ip = ""
503
      env["INSTANCE_NIC%d_IP" % idx] = ip
504
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
505
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
506
  else:
507
    nic_count = 0
508

    
509
  env["INSTANCE_NIC_COUNT"] = nic_count
510

    
511
  return env
512

    
513

    
514
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
515
  """Builds instance related env variables for hooks from an object.
516

517
  @type lu: L{LogicalUnit}
518
  @param lu: the logical unit on whose behalf we execute
519
  @type instance: L{objects.Instance}
520
  @param instance: the instance for which we should build the
521
      environment
522
  @type override: dict
523
  @param override: dictionary with key/values that will override
524
      our values
525
  @rtype: dict
526
  @return: the hook environment dictionary
527

528
  """
529
  bep = lu.cfg.GetClusterInfo().FillBE(instance)
530
  args = {
531
    'name': instance.name,
532
    'primary_node': instance.primary_node,
533
    'secondary_nodes': instance.secondary_nodes,
534
    'os_type': instance.os,
535
    'status': instance.admin_up,
536
    'memory': bep[constants.BE_MEMORY],
537
    'vcpus': bep[constants.BE_VCPUS],
538
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
539
  }
540
  if override:
541
    args.update(override)
542
  return _BuildInstanceHookEnv(**args)
543

    
544

    
545
def _AdjustCandidatePool(lu):
546
  """Adjust the candidate pool after node operations.
547

548
  """
549
  mod_list = lu.cfg.MaintainCandidatePool()
550
  if mod_list:
551
    lu.LogInfo("Promoted nodes to master candidate role: %s",
552
               ", ".join(node.name for node in mod_list))
553
    for name in mod_list:
554
      lu.context.ReaddNode(name)
555
  mc_now, mc_max = lu.cfg.GetMasterCandidateStats()
556
  if mc_now > mc_max:
557
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
558
               (mc_now, mc_max))
559

    
560

    
561
def _CheckInstanceBridgesExist(lu, instance):
562
  """Check that the brigdes needed by an instance exist.
563

564
  """
565
  # check bridges existance
566
  brlist = [nic.bridge for nic in instance.nics]
567
  result = lu.rpc.call_bridges_exist(instance.primary_node, brlist)
568
  result.Raise()
569
  if not result.data:
570
    raise errors.OpPrereqError("One or more target bridges %s does not"
571
                               " exist on destination node '%s'" %
572
                               (brlist, instance.primary_node))
573

    
574

    
575
class LUDestroyCluster(NoHooksLU):
576
  """Logical unit for destroying the cluster.
577

578
  """
579
  _OP_REQP = []
580

    
581
  def CheckPrereq(self):
582
    """Check prerequisites.
583

584
    This checks whether the cluster is empty.
585

586
    Any errors are signalled by raising errors.OpPrereqError.
587

588
    """
589
    master = self.cfg.GetMasterNode()
590

    
591
    nodelist = self.cfg.GetNodeList()
592
    if len(nodelist) != 1 or nodelist[0] != master:
593
      raise errors.OpPrereqError("There are still %d node(s) in"
594
                                 " this cluster." % (len(nodelist) - 1))
595
    instancelist = self.cfg.GetInstanceList()
596
    if instancelist:
597
      raise errors.OpPrereqError("There are still %d instance(s) in"
598
                                 " this cluster." % len(instancelist))
599

    
600
  def Exec(self, feedback_fn):
601
    """Destroys the cluster.
602

603
    """
604
    master = self.cfg.GetMasterNode()
605
    result = self.rpc.call_node_stop_master(master, False)
606
    result.Raise()
607
    if not result.data:
608
      raise errors.OpExecError("Could not disable the master role")
609
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
610
    utils.CreateBackup(priv_key)
611
    utils.CreateBackup(pub_key)
612
    return master
613

    
614

    
615
class LUVerifyCluster(LogicalUnit):
616
  """Verifies the cluster status.
617

618
  """
619
  HPATH = "cluster-verify"
620
  HTYPE = constants.HTYPE_CLUSTER
621
  _OP_REQP = ["skip_checks"]
622
  REQ_BGL = False
623

    
624
  def ExpandNames(self):
625
    self.needed_locks = {
626
      locking.LEVEL_NODE: locking.ALL_SET,
627
      locking.LEVEL_INSTANCE: locking.ALL_SET,
628
    }
629
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
630

    
631
  def _VerifyNode(self, nodeinfo, file_list, local_cksum,
632
                  node_result, feedback_fn, master_files,
633
                  drbd_map):
634
    """Run multiple tests against a node.
635

636
    Test list:
637

638
      - compares ganeti version
639
      - checks vg existance and size > 20G
640
      - checks config file checksum
641
      - checks ssh to other nodes
642

643
    @type nodeinfo: L{objects.Node}
644
    @param nodeinfo: the node to check
645
    @param file_list: required list of files
646
    @param local_cksum: dictionary of local files and their checksums
647
    @param node_result: the results from the node
648
    @param feedback_fn: function used to accumulate results
649
    @param master_files: list of files that only masters should have
650
    @param drbd_map: the useddrbd minors for this node, in
651
        form of minor: (instance, must_exist) which correspond to instances
652
        and their running status
653

654
    """
655
    node = nodeinfo.name
656

    
657
    # main result, node_result should be a non-empty dict
658
    if not node_result or not isinstance(node_result, dict):
659
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
660
      return True
661

    
662
    # compares ganeti version
663
    local_version = constants.PROTOCOL_VERSION
664
    remote_version = node_result.get('version', None)
665
    if not (remote_version and isinstance(remote_version, (list, tuple)) and
666
            len(remote_version) == 2):
667
      feedback_fn("  - ERROR: connection to %s failed" % (node))
668
      return True
669

    
670
    if local_version != remote_version[0]:
671
      feedback_fn("  - ERROR: incompatible protocol versions: master %s,"
672
                  " node %s %s" % (local_version, node, remote_version[0]))
673
      return True
674

    
675
    # node seems compatible, we can actually try to look into its results
676

    
677
    bad = False
678

    
679
    # full package version
680
    if constants.RELEASE_VERSION != remote_version[1]:
681
      feedback_fn("  - WARNING: software version mismatch: master %s,"
682
                  " node %s %s" %
683
                  (constants.RELEASE_VERSION, node, remote_version[1]))
684

    
685
    # checks vg existence and size > 20G
686

    
687
    vglist = node_result.get(constants.NV_VGLIST, None)
688
    if not vglist:
689
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
690
                      (node,))
691
      bad = True
692
    else:
693
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
694
                                            constants.MIN_VG_SIZE)
695
      if vgstatus:
696
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
697
        bad = True
698

    
699
    # checks config file checksum
700

    
701
    remote_cksum = node_result.get(constants.NV_FILELIST, None)
702
    if not isinstance(remote_cksum, dict):
703
      bad = True
704
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
705
    else:
706
      for file_name in file_list:
707
        node_is_mc = nodeinfo.master_candidate
708
        must_have_file = file_name not in master_files
709
        if file_name not in remote_cksum:
710
          if node_is_mc or must_have_file:
711
            bad = True
712
            feedback_fn("  - ERROR: file '%s' missing" % file_name)
713
        elif remote_cksum[file_name] != local_cksum[file_name]:
714
          if node_is_mc or must_have_file:
715
            bad = True
716
            feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
717
          else:
718
            # not candidate and this is not a must-have file
719
            bad = True
720
            feedback_fn("  - ERROR: non master-candidate has old/wrong file"
721
                        " '%s'" % file_name)
722
        else:
723
          # all good, except non-master/non-must have combination
724
          if not node_is_mc and not must_have_file:
725
            feedback_fn("  - ERROR: file '%s' should not exist on non master"
726
                        " candidates" % file_name)
727

    
728
    # checks ssh to any
729

    
730
    if constants.NV_NODELIST not in node_result:
731
      bad = True
732
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
733
    else:
734
      if node_result[constants.NV_NODELIST]:
735
        bad = True
736
        for node in node_result[constants.NV_NODELIST]:
737
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
738
                          (node, node_result[constants.NV_NODELIST][node]))
739

    
740
    if constants.NV_NODENETTEST not in node_result:
741
      bad = True
742
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
743
    else:
744
      if node_result[constants.NV_NODENETTEST]:
745
        bad = True
746
        nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys())
747
        for node in nlist:
748
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
749
                          (node, node_result[constants.NV_NODENETTEST][node]))
750

    
751
    hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
752
    if isinstance(hyp_result, dict):
753
      for hv_name, hv_result in hyp_result.iteritems():
754
        if hv_result is not None:
755
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
756
                      (hv_name, hv_result))
757

    
758
    # check used drbd list
759
    used_minors = node_result.get(constants.NV_DRBDLIST, [])
760
    for minor, (iname, must_exist) in drbd_map.items():
761
      if minor not in used_minors and must_exist:
762
        feedback_fn("  - ERROR: drbd minor %d of instance %s is not active" %
763
                    (minor, iname))
764
        bad = True
765
    for minor in used_minors:
766
      if minor not in drbd_map:
767
        feedback_fn("  - ERROR: unallocated drbd minor %d is in use" % minor)
768
        bad = True
769

    
770
    return bad
771

    
772
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
773
                      node_instance, feedback_fn, n_offline):
774
    """Verify an instance.
775

776
    This function checks to see if the required block devices are
777
    available on the instance's node.
778

779
    """
780
    bad = False
781

    
782
    node_current = instanceconfig.primary_node
783

    
784
    node_vol_should = {}
785
    instanceconfig.MapLVsByNode(node_vol_should)
786

    
787
    for node in node_vol_should:
788
      if node in n_offline:
789
        # ignore missing volumes on offline nodes
790
        continue
791
      for volume in node_vol_should[node]:
792
        if node not in node_vol_is or volume not in node_vol_is[node]:
793
          feedback_fn("  - ERROR: volume %s missing on node %s" %
794
                          (volume, node))
795
          bad = True
796

    
797
    if instanceconfig.admin_up:
798
      if ((node_current not in node_instance or
799
          not instance in node_instance[node_current]) and
800
          node_current not in n_offline):
801
        feedback_fn("  - ERROR: instance %s not running on node %s" %
802
                        (instance, node_current))
803
        bad = True
804

    
805
    for node in node_instance:
806
      if (not node == node_current):
807
        if instance in node_instance[node]:
808
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
809
                          (instance, node))
810
          bad = True
811

    
812
    return bad
813

    
814
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
815
    """Verify if there are any unknown volumes in the cluster.
816

817
    The .os, .swap and backup volumes are ignored. All other volumes are
818
    reported as unknown.
819

820
    """
821
    bad = False
822

    
823
    for node in node_vol_is:
824
      for volume in node_vol_is[node]:
825
        if node not in node_vol_should or volume not in node_vol_should[node]:
826
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
827
                      (volume, node))
828
          bad = True
829
    return bad
830

    
831
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
832
    """Verify the list of running instances.
833

834
    This checks what instances are running but unknown to the cluster.
835

836
    """
837
    bad = False
838
    for node in node_instance:
839
      for runninginstance in node_instance[node]:
840
        if runninginstance not in instancelist:
841
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
842
                          (runninginstance, node))
843
          bad = True
844
    return bad
845

    
846
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
847
    """Verify N+1 Memory Resilience.
848

849
    Check that if one single node dies we can still start all the instances it
850
    was primary for.
851

852
    """
853
    bad = False
854

    
855
    for node, nodeinfo in node_info.iteritems():
856
      # This code checks that every node which is now listed as secondary has
857
      # enough memory to host all instances it is supposed to should a single
858
      # other node in the cluster fail.
859
      # FIXME: not ready for failover to an arbitrary node
860
      # FIXME: does not support file-backed instances
861
      # WARNING: we currently take into account down instances as well as up
862
      # ones, considering that even if they're down someone might want to start
863
      # them even in the event of a node failure.
864
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
865
        needed_mem = 0
866
        for instance in instances:
867
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
868
          if bep[constants.BE_AUTO_BALANCE]:
869
            needed_mem += bep[constants.BE_MEMORY]
870
        if nodeinfo['mfree'] < needed_mem:
871
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
872
                      " failovers should node %s fail" % (node, prinode))
873
          bad = True
874
    return bad
875

    
876
  def CheckPrereq(self):
877
    """Check prerequisites.
878

879
    Transform the list of checks we're going to skip into a set and check that
880
    all its members are valid.
881

882
    """
883
    self.skip_set = frozenset(self.op.skip_checks)
884
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
885
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
886

    
887
  def BuildHooksEnv(self):
888
    """Build hooks env.
889

890
    Cluster-Verify hooks just rone in the post phase and their failure makes
891
    the output be logged in the verify output and the verification to fail.
892

893
    """
894
    all_nodes = self.cfg.GetNodeList()
895
    # TODO: populate the environment with useful information for verify hooks
896
    env = {}
897
    return env, [], all_nodes
898

    
899
  def Exec(self, feedback_fn):
900
    """Verify integrity of cluster, performing various test on nodes.
901

902
    """
903
    bad = False
904
    feedback_fn("* Verifying global settings")
905
    for msg in self.cfg.VerifyConfig():
906
      feedback_fn("  - ERROR: %s" % msg)
907

    
908
    vg_name = self.cfg.GetVGName()
909
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
910
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
911
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
912
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
913
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
914
                        for iname in instancelist)
915
    i_non_redundant = [] # Non redundant instances
916
    i_non_a_balanced = [] # Non auto-balanced instances
917
    n_offline = [] # List of offline nodes
918
    n_drained = [] # List of nodes being drained
919
    node_volume = {}
920
    node_instance = {}
921
    node_info = {}
922
    instance_cfg = {}
923

    
924
    # FIXME: verify OS list
925
    # do local checksums
926
    master_files = [constants.CLUSTER_CONF_FILE]
927

    
928
    file_names = ssconf.SimpleStore().GetFileList()
929
    file_names.append(constants.SSL_CERT_FILE)
930
    file_names.append(constants.RAPI_CERT_FILE)
931
    file_names.extend(master_files)
932

    
933
    local_checksums = utils.FingerprintFiles(file_names)
934

    
935
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
936
    node_verify_param = {
937
      constants.NV_FILELIST: file_names,
938
      constants.NV_NODELIST: [node.name for node in nodeinfo
939
                              if not node.offline],
940
      constants.NV_HYPERVISOR: hypervisors,
941
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
942
                                  node.secondary_ip) for node in nodeinfo
943
                                 if not node.offline],
944
      constants.NV_LVLIST: vg_name,
945
      constants.NV_INSTANCELIST: hypervisors,
946
      constants.NV_VGLIST: None,
947
      constants.NV_VERSION: None,
948
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
949
      constants.NV_DRBDLIST: None,
950
      }
951
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
952
                                           self.cfg.GetClusterName())
953

    
954
    cluster = self.cfg.GetClusterInfo()
955
    master_node = self.cfg.GetMasterNode()
956
    all_drbd_map = self.cfg.ComputeDRBDMap()
957

    
958
    for node_i in nodeinfo:
959
      node = node_i.name
960
      nresult = all_nvinfo[node].data
961

    
962
      if node_i.offline:
963
        feedback_fn("* Skipping offline node %s" % (node,))
964
        n_offline.append(node)
965
        continue
966

    
967
      if node == master_node:
968
        ntype = "master"
969
      elif node_i.master_candidate:
970
        ntype = "master candidate"
971
      elif node_i.drained:
972
        ntype = "drained"
973
        n_drained.append(node)
974
      else:
975
        ntype = "regular"
976
      feedback_fn("* Verifying node %s (%s)" % (node, ntype))
977

    
978
      if all_nvinfo[node].failed or not isinstance(nresult, dict):
979
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
980
        bad = True
981
        continue
982

    
983
      node_drbd = {}
984
      for minor, instance in all_drbd_map[node].items():
985
        instance = instanceinfo[instance]
986
        node_drbd[minor] = (instance.name, instance.admin_up)
987
      result = self._VerifyNode(node_i, file_names, local_checksums,
988
                                nresult, feedback_fn, master_files,
989
                                node_drbd)
990
      bad = bad or result
991

    
992
      lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
993
      if isinstance(lvdata, basestring):
994
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
995
                    (node, utils.SafeEncode(lvdata)))
996
        bad = True
997
        node_volume[node] = {}
998
      elif not isinstance(lvdata, dict):
999
        feedback_fn("  - ERROR: connection to %s failed (lvlist)" % (node,))
1000
        bad = True
1001
        continue
1002
      else:
1003
        node_volume[node] = lvdata
1004

    
1005
      # node_instance
1006
      idata = nresult.get(constants.NV_INSTANCELIST, None)
1007
      if not isinstance(idata, list):
1008
        feedback_fn("  - ERROR: connection to %s failed (instancelist)" %
1009
                    (node,))
1010
        bad = True
1011
        continue
1012

    
1013
      node_instance[node] = idata
1014

    
1015
      # node_info
1016
      nodeinfo = nresult.get(constants.NV_HVINFO, None)
1017
      if not isinstance(nodeinfo, dict):
1018
        feedback_fn("  - ERROR: connection to %s failed (hvinfo)" % (node,))
1019
        bad = True
1020
        continue
1021

    
1022
      try:
1023
        node_info[node] = {
1024
          "mfree": int(nodeinfo['memory_free']),
1025
          "dfree": int(nresult[constants.NV_VGLIST][vg_name]),
1026
          "pinst": [],
1027
          "sinst": [],
1028
          # dictionary holding all instances this node is secondary for,
1029
          # grouped by their primary node. Each key is a cluster node, and each
1030
          # value is a list of instances which have the key as primary and the
1031
          # current node as secondary.  this is handy to calculate N+1 memory
1032
          # availability if you can only failover from a primary to its
1033
          # secondary.
1034
          "sinst-by-pnode": {},
1035
        }
1036
      except ValueError:
1037
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
1038
        bad = True
1039
        continue
1040

    
1041
    node_vol_should = {}
1042

    
1043
    for instance in instancelist:
1044
      feedback_fn("* Verifying instance %s" % instance)
1045
      inst_config = instanceinfo[instance]
1046
      result =  self._VerifyInstance(instance, inst_config, node_volume,
1047
                                     node_instance, feedback_fn, n_offline)
1048
      bad = bad or result
1049
      inst_nodes_offline = []
1050

    
1051
      inst_config.MapLVsByNode(node_vol_should)
1052

    
1053
      instance_cfg[instance] = inst_config
1054

    
1055
      pnode = inst_config.primary_node
1056
      if pnode in node_info:
1057
        node_info[pnode]['pinst'].append(instance)
1058
      elif pnode not in n_offline:
1059
        feedback_fn("  - ERROR: instance %s, connection to primary node"
1060
                    " %s failed" % (instance, pnode))
1061
        bad = True
1062

    
1063
      if pnode in n_offline:
1064
        inst_nodes_offline.append(pnode)
1065

    
1066
      # If the instance is non-redundant we cannot survive losing its primary
1067
      # node, so we are not N+1 compliant. On the other hand we have no disk
1068
      # templates with more than one secondary so that situation is not well
1069
      # supported either.
1070
      # FIXME: does not support file-backed instances
1071
      if len(inst_config.secondary_nodes) == 0:
1072
        i_non_redundant.append(instance)
1073
      elif len(inst_config.secondary_nodes) > 1:
1074
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
1075
                    % instance)
1076

    
1077
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1078
        i_non_a_balanced.append(instance)
1079

    
1080
      for snode in inst_config.secondary_nodes:
1081
        if snode in node_info:
1082
          node_info[snode]['sinst'].append(instance)
1083
          if pnode not in node_info[snode]['sinst-by-pnode']:
1084
            node_info[snode]['sinst-by-pnode'][pnode] = []
1085
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
1086
        elif snode not in n_offline:
1087
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
1088
                      " %s failed" % (instance, snode))
1089
          bad = True
1090
        if snode in n_offline:
1091
          inst_nodes_offline.append(snode)
1092

    
1093
      if inst_nodes_offline:
1094
        # warn that the instance lives on offline nodes, and set bad=True
1095
        feedback_fn("  - ERROR: instance lives on offline node(s) %s" %
1096
                    ", ".join(inst_nodes_offline))
1097
        bad = True
1098

    
1099
    feedback_fn("* Verifying orphan volumes")
1100
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
1101
                                       feedback_fn)
1102
    bad = bad or result
1103

    
1104
    feedback_fn("* Verifying remaining instances")
1105
    result = self._VerifyOrphanInstances(instancelist, node_instance,
1106
                                         feedback_fn)
1107
    bad = bad or result
1108

    
1109
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1110
      feedback_fn("* Verifying N+1 Memory redundancy")
1111
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
1112
      bad = bad or result
1113

    
1114
    feedback_fn("* Other Notes")
1115
    if i_non_redundant:
1116
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1117
                  % len(i_non_redundant))
1118

    
1119
    if i_non_a_balanced:
1120
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1121
                  % len(i_non_a_balanced))
1122

    
1123
    if n_offline:
1124
      feedback_fn("  - NOTICE: %d offline node(s) found." % len(n_offline))
1125

    
1126
    if n_drained:
1127
      feedback_fn("  - NOTICE: %d drained node(s) found." % len(n_drained))
1128

    
1129
    return not bad
1130

    
1131
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1132
    """Analize the post-hooks' result
1133

1134
    This method analyses the hook result, handles it, and sends some
1135
    nicely-formatted feedback back to the user.
1136

1137
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1138
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1139
    @param hooks_results: the results of the multi-node hooks rpc call
1140
    @param feedback_fn: function used send feedback back to the caller
1141
    @param lu_result: previous Exec result
1142
    @return: the new Exec result, based on the previous result
1143
        and hook results
1144

1145
    """
1146
    # We only really run POST phase hooks, and are only interested in
1147
    # their results
1148
    if phase == constants.HOOKS_PHASE_POST:
1149
      # Used to change hooks' output to proper indentation
1150
      indent_re = re.compile('^', re.M)
1151
      feedback_fn("* Hooks Results")
1152
      if not hooks_results:
1153
        feedback_fn("  - ERROR: general communication failure")
1154
        lu_result = 1
1155
      else:
1156
        for node_name in hooks_results:
1157
          show_node_header = True
1158
          res = hooks_results[node_name]
1159
          if res.failed or res.data is False or not isinstance(res.data, list):
1160
            if res.offline:
1161
              # no need to warn or set fail return value
1162
              continue
1163
            feedback_fn("    Communication failure in hooks execution")
1164
            lu_result = 1
1165
            continue
1166
          for script, hkr, output in res.data:
1167
            if hkr == constants.HKR_FAIL:
1168
              # The node header is only shown once, if there are
1169
              # failing hooks on that node
1170
              if show_node_header:
1171
                feedback_fn("  Node %s:" % node_name)
1172
                show_node_header = False
1173
              feedback_fn("    ERROR: Script %s failed, output:" % script)
1174
              output = indent_re.sub('      ', output)
1175
              feedback_fn("%s" % output)
1176
              lu_result = 1
1177

    
1178
      return lu_result
1179

    
1180

    
1181
class LUVerifyDisks(NoHooksLU):
1182
  """Verifies the cluster disks status.
1183

1184
  """
1185
  _OP_REQP = []
1186
  REQ_BGL = False
1187

    
1188
  def ExpandNames(self):
1189
    self.needed_locks = {
1190
      locking.LEVEL_NODE: locking.ALL_SET,
1191
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1192
    }
1193
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1194

    
1195
  def CheckPrereq(self):
1196
    """Check prerequisites.
1197

1198
    This has no prerequisites.
1199

1200
    """
1201
    pass
1202

    
1203
  def Exec(self, feedback_fn):
1204
    """Verify integrity of cluster disks.
1205

1206
    """
1207
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
1208

    
1209
    vg_name = self.cfg.GetVGName()
1210
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1211
    instances = [self.cfg.GetInstanceInfo(name)
1212
                 for name in self.cfg.GetInstanceList()]
1213

    
1214
    nv_dict = {}
1215
    for inst in instances:
1216
      inst_lvs = {}
1217
      if (not inst.admin_up or
1218
          inst.disk_template not in constants.DTS_NET_MIRROR):
1219
        continue
1220
      inst.MapLVsByNode(inst_lvs)
1221
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1222
      for node, vol_list in inst_lvs.iteritems():
1223
        for vol in vol_list:
1224
          nv_dict[(node, vol)] = inst
1225

    
1226
    if not nv_dict:
1227
      return result
1228

    
1229
    node_lvs = self.rpc.call_volume_list(nodes, vg_name)
1230

    
1231
    to_act = set()
1232
    for node in nodes:
1233
      # node_volume
1234
      lvs = node_lvs[node]
1235
      if lvs.failed:
1236
        if not lvs.offline:
1237
          self.LogWarning("Connection to node %s failed: %s" %
1238
                          (node, lvs.data))
1239
        continue
1240
      lvs = lvs.data
1241
      if isinstance(lvs, basestring):
1242
        logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
1243
        res_nlvm[node] = lvs
1244
      elif not isinstance(lvs, dict):
1245
        logging.warning("Connection to node %s failed or invalid data"
1246
                        " returned", node)
1247
        res_nodes.append(node)
1248
        continue
1249

    
1250
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
1251
        inst = nv_dict.pop((node, lv_name), None)
1252
        if (not lv_online and inst is not None
1253
            and inst.name not in res_instances):
1254
          res_instances.append(inst.name)
1255

    
1256
    # any leftover items in nv_dict are missing LVs, let's arrange the
1257
    # data better
1258
    for key, inst in nv_dict.iteritems():
1259
      if inst.name not in res_missing:
1260
        res_missing[inst.name] = []
1261
      res_missing[inst.name].append(key)
1262

    
1263
    return result
1264

    
1265

    
1266
class LURenameCluster(LogicalUnit):
1267
  """Rename the cluster.
1268

1269
  """
1270
  HPATH = "cluster-rename"
1271
  HTYPE = constants.HTYPE_CLUSTER
1272
  _OP_REQP = ["name"]
1273

    
1274
  def BuildHooksEnv(self):
1275
    """Build hooks env.
1276

1277
    """
1278
    env = {
1279
      "OP_TARGET": self.cfg.GetClusterName(),
1280
      "NEW_NAME": self.op.name,
1281
      }
1282
    mn = self.cfg.GetMasterNode()
1283
    return env, [mn], [mn]
1284

    
1285
  def CheckPrereq(self):
1286
    """Verify that the passed name is a valid one.
1287

1288
    """
1289
    hostname = utils.HostInfo(self.op.name)
1290

    
1291
    new_name = hostname.name
1292
    self.ip = new_ip = hostname.ip
1293
    old_name = self.cfg.GetClusterName()
1294
    old_ip = self.cfg.GetMasterIP()
1295
    if new_name == old_name and new_ip == old_ip:
1296
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1297
                                 " cluster has changed")
1298
    if new_ip != old_ip:
1299
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1300
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1301
                                   " reachable on the network. Aborting." %
1302
                                   new_ip)
1303

    
1304
    self.op.name = new_name
1305

    
1306
  def Exec(self, feedback_fn):
1307
    """Rename the cluster.
1308

1309
    """
1310
    clustername = self.op.name
1311
    ip = self.ip
1312

    
1313
    # shutdown the master IP
1314
    master = self.cfg.GetMasterNode()
1315
    result = self.rpc.call_node_stop_master(master, False)
1316
    if result.failed or not result.data:
1317
      raise errors.OpExecError("Could not disable the master role")
1318

    
1319
    try:
1320
      cluster = self.cfg.GetClusterInfo()
1321
      cluster.cluster_name = clustername
1322
      cluster.master_ip = ip
1323
      self.cfg.Update(cluster)
1324

    
1325
      # update the known hosts file
1326
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
1327
      node_list = self.cfg.GetNodeList()
1328
      try:
1329
        node_list.remove(master)
1330
      except ValueError:
1331
        pass
1332
      result = self.rpc.call_upload_file(node_list,
1333
                                         constants.SSH_KNOWN_HOSTS_FILE)
1334
      for to_node, to_result in result.iteritems():
1335
        if to_result.failed or not to_result.data:
1336
          logging.error("Copy of file %s to node %s failed",
1337
                        constants.SSH_KNOWN_HOSTS_FILE, to_node)
1338

    
1339
    finally:
1340
      result = self.rpc.call_node_start_master(master, False)
1341
      if result.failed or not result.data:
1342
        self.LogWarning("Could not re-enable the master role on"
1343
                        " the master, please restart manually.")
1344

    
1345

    
1346
def _RecursiveCheckIfLVMBased(disk):
1347
  """Check if the given disk or its children are lvm-based.
1348

1349
  @type disk: L{objects.Disk}
1350
  @param disk: the disk to check
1351
  @rtype: booleean
1352
  @return: boolean indicating whether a LD_LV dev_type was found or not
1353

1354
  """
1355
  if disk.children:
1356
    for chdisk in disk.children:
1357
      if _RecursiveCheckIfLVMBased(chdisk):
1358
        return True
1359
  return disk.dev_type == constants.LD_LV
1360

    
1361

    
1362
class LUSetClusterParams(LogicalUnit):
1363
  """Change the parameters of the cluster.
1364

1365
  """
1366
  HPATH = "cluster-modify"
1367
  HTYPE = constants.HTYPE_CLUSTER
1368
  _OP_REQP = []
1369
  REQ_BGL = False
1370

    
1371
  def CheckParameters(self):
1372
    """Check parameters
1373

1374
    """
1375
    if not hasattr(self.op, "candidate_pool_size"):
1376
      self.op.candidate_pool_size = None
1377
    if self.op.candidate_pool_size is not None:
1378
      try:
1379
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1380
      except ValueError, err:
1381
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1382
                                   str(err))
1383
      if self.op.candidate_pool_size < 1:
1384
        raise errors.OpPrereqError("At least one master candidate needed")
1385

    
1386
  def ExpandNames(self):
1387
    # FIXME: in the future maybe other cluster params won't require checking on
1388
    # all nodes to be modified.
1389
    self.needed_locks = {
1390
      locking.LEVEL_NODE: locking.ALL_SET,
1391
    }
1392
    self.share_locks[locking.LEVEL_NODE] = 1
1393

    
1394
  def BuildHooksEnv(self):
1395
    """Build hooks env.
1396

1397
    """
1398
    env = {
1399
      "OP_TARGET": self.cfg.GetClusterName(),
1400
      "NEW_VG_NAME": self.op.vg_name,
1401
      }
1402
    mn = self.cfg.GetMasterNode()
1403
    return env, [mn], [mn]
1404

    
1405
  def CheckPrereq(self):
1406
    """Check prerequisites.
1407

1408
    This checks whether the given params don't conflict and
1409
    if the given volume group is valid.
1410

1411
    """
1412
    # FIXME: This only works because there is only one parameter that can be
1413
    # changed or removed.
1414
    if self.op.vg_name is not None and not self.op.vg_name:
1415
      instances = self.cfg.GetAllInstancesInfo().values()
1416
      for inst in instances:
1417
        for disk in inst.disks:
1418
          if _RecursiveCheckIfLVMBased(disk):
1419
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1420
                                       " lvm-based instances exist")
1421

    
1422
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1423

    
1424
    # if vg_name not None, checks given volume group on all nodes
1425
    if self.op.vg_name:
1426
      vglist = self.rpc.call_vg_list(node_list)
1427
      for node in node_list:
1428
        if vglist[node].failed:
1429
          # ignoring down node
1430
          self.LogWarning("Node %s unreachable/error, ignoring" % node)
1431
          continue
1432
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].data,
1433
                                              self.op.vg_name,
1434
                                              constants.MIN_VG_SIZE)
1435
        if vgstatus:
1436
          raise errors.OpPrereqError("Error on node '%s': %s" %
1437
                                     (node, vgstatus))
1438

    
1439
    self.cluster = cluster = self.cfg.GetClusterInfo()
1440
    # validate beparams changes
1441
    if self.op.beparams:
1442
      utils.CheckBEParams(self.op.beparams)
1443
      self.new_beparams = cluster.FillDict(
1444
        cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)
1445

    
1446
    # hypervisor list/parameters
1447
    self.new_hvparams = cluster.FillDict(cluster.hvparams, {})
1448
    if self.op.hvparams:
1449
      if not isinstance(self.op.hvparams, dict):
1450
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1451
      for hv_name, hv_dict in self.op.hvparams.items():
1452
        if hv_name not in self.new_hvparams:
1453
          self.new_hvparams[hv_name] = hv_dict
1454
        else:
1455
          self.new_hvparams[hv_name].update(hv_dict)
1456

    
1457
    if self.op.enabled_hypervisors is not None:
1458
      self.hv_list = self.op.enabled_hypervisors
1459
    else:
1460
      self.hv_list = cluster.enabled_hypervisors
1461

    
1462
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1463
      # either the enabled list has changed, or the parameters have, validate
1464
      for hv_name, hv_params in self.new_hvparams.items():
1465
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1466
            (self.op.enabled_hypervisors and
1467
             hv_name in self.op.enabled_hypervisors)):
1468
          # either this is a new hypervisor, or its parameters have changed
1469
          hv_class = hypervisor.GetHypervisor(hv_name)
1470
          hv_class.CheckParameterSyntax(hv_params)
1471
          _CheckHVParams(self, node_list, hv_name, hv_params)
1472

    
1473
  def Exec(self, feedback_fn):
1474
    """Change the parameters of the cluster.
1475

1476
    """
1477
    if self.op.vg_name is not None:
1478
      if self.op.vg_name != self.cfg.GetVGName():
1479
        self.cfg.SetVGName(self.op.vg_name)
1480
      else:
1481
        feedback_fn("Cluster LVM configuration already in desired"
1482
                    " state, not changing")
1483
    if self.op.hvparams:
1484
      self.cluster.hvparams = self.new_hvparams
1485
    if self.op.enabled_hypervisors is not None:
1486
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1487
    if self.op.beparams:
1488
      self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams
1489
    if self.op.candidate_pool_size is not None:
1490
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
1491

    
1492
    self.cfg.Update(self.cluster)
1493

    
1494
    # we want to update nodes after the cluster so that if any errors
1495
    # happen, we have recorded and saved the cluster info
1496
    if self.op.candidate_pool_size is not None:
1497
      _AdjustCandidatePool(self)
1498

    
1499

    
1500
class LURedistributeConfig(NoHooksLU):
1501
  """Force the redistribution of cluster configuration.
1502

1503
  This is a very simple LU.
1504

1505
  """
1506
  _OP_REQP = []
1507
  REQ_BGL = False
1508

    
1509
  def ExpandNames(self):
1510
    self.needed_locks = {
1511
      locking.LEVEL_NODE: locking.ALL_SET,
1512
    }
1513
    self.share_locks[locking.LEVEL_NODE] = 1
1514

    
1515
  def CheckPrereq(self):
1516
    """Check prerequisites.
1517

1518
    """
1519

    
1520
  def Exec(self, feedback_fn):
1521
    """Redistribute the configuration.
1522

1523
    """
1524
    self.cfg.Update(self.cfg.GetClusterInfo())
1525

    
1526

    
1527
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1528
  """Sleep and poll for an instance's disk to sync.
1529

1530
  """
1531
  if not instance.disks:
1532
    return True
1533

    
1534
  if not oneshot:
1535
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1536

    
1537
  node = instance.primary_node
1538

    
1539
  for dev in instance.disks:
1540
    lu.cfg.SetDiskID(dev, node)
1541

    
1542
  retries = 0
1543
  while True:
1544
    max_time = 0
1545
    done = True
1546
    cumul_degraded = False
1547
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1548
    if rstats.failed or not rstats.data:
1549
      lu.LogWarning("Can't get any data from node %s", node)
1550
      retries += 1
1551
      if retries >= 10:
1552
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1553
                                 " aborting." % node)
1554
      time.sleep(6)
1555
      continue
1556
    rstats = rstats.data
1557
    retries = 0
1558
    for i, mstat in enumerate(rstats):
1559
      if mstat is None:
1560
        lu.LogWarning("Can't compute data for node %s/%s",
1561
                           node, instance.disks[i].iv_name)
1562
        continue
1563
      # we ignore the ldisk parameter
1564
      perc_done, est_time, is_degraded, _ = mstat
1565
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1566
      if perc_done is not None:
1567
        done = False
1568
        if est_time is not None:
1569
          rem_time = "%d estimated seconds remaining" % est_time
1570
          max_time = est_time
1571
        else:
1572
          rem_time = "no time estimate"
1573
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1574
                        (instance.disks[i].iv_name, perc_done, rem_time))
1575
    if done or oneshot:
1576
      break
1577

    
1578
    time.sleep(min(60, max_time))
1579

    
1580
  if done:
1581
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1582
  return not cumul_degraded
1583

    
1584

    
1585
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1586
  """Check that mirrors are not degraded.
1587

1588
  The ldisk parameter, if True, will change the test from the
1589
  is_degraded attribute (which represents overall non-ok status for
1590
  the device(s)) to the ldisk (representing the local storage status).
1591

1592
  """
1593
  lu.cfg.SetDiskID(dev, node)
1594
  if ldisk:
1595
    idx = 6
1596
  else:
1597
    idx = 5
1598

    
1599
  result = True
1600
  if on_primary or dev.AssembleOnSecondary():
1601
    rstats = lu.rpc.call_blockdev_find(node, dev)
1602
    msg = rstats.RemoteFailMsg()
1603
    if msg:
1604
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
1605
      result = False
1606
    elif not rstats.payload:
1607
      lu.LogWarning("Can't find disk on node %s", node)
1608
      result = False
1609
    else:
1610
      result = result and (not rstats.payload[idx])
1611
  if dev.children:
1612
    for child in dev.children:
1613
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1614

    
1615
  return result
1616

    
1617

    
1618
class LUDiagnoseOS(NoHooksLU):
1619
  """Logical unit for OS diagnose/query.
1620

1621
  """
1622
  _OP_REQP = ["output_fields", "names"]
1623
  REQ_BGL = False
1624
  _FIELDS_STATIC = utils.FieldSet()
1625
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
1626

    
1627
  def ExpandNames(self):
1628
    if self.op.names:
1629
      raise errors.OpPrereqError("Selective OS query not supported")
1630

    
1631
    _CheckOutputFields(static=self._FIELDS_STATIC,
1632
                       dynamic=self._FIELDS_DYNAMIC,
1633
                       selected=self.op.output_fields)
1634

    
1635
    # Lock all nodes, in shared mode
1636
    self.needed_locks = {}
1637
    self.share_locks[locking.LEVEL_NODE] = 1
1638
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1639

    
1640
  def CheckPrereq(self):
1641
    """Check prerequisites.
1642

1643
    """
1644

    
1645
  @staticmethod
1646
  def _DiagnoseByOS(node_list, rlist):
1647
    """Remaps a per-node return list into an a per-os per-node dictionary
1648

1649
    @param node_list: a list with the names of all nodes
1650
    @param rlist: a map with node names as keys and OS objects as values
1651

1652
    @rtype: dict
1653
    @returns: a dictionary with osnames as keys and as value another map, with
1654
        nodes as keys and list of OS objects as values, eg::
1655

1656
          {"debian-etch": {"node1": [<object>,...],
1657
                           "node2": [<object>,]}
1658
          }
1659

1660
    """
1661
    all_os = {}
1662
    for node_name, nr in rlist.iteritems():
1663
      if nr.failed or not nr.data:
1664
        continue
1665
      for os_obj in nr.data:
1666
        if os_obj.name not in all_os:
1667
          # build a list of nodes for this os containing empty lists
1668
          # for each node in node_list
1669
          all_os[os_obj.name] = {}
1670
          for nname in node_list:
1671
            all_os[os_obj.name][nname] = []
1672
        all_os[os_obj.name][node_name].append(os_obj)
1673
    return all_os
1674

    
1675
  def Exec(self, feedback_fn):
1676
    """Compute the list of OSes.
1677

1678
    """
1679
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1680
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()
1681
                   if node in node_list]
1682
    node_data = self.rpc.call_os_diagnose(valid_nodes)
1683
    if node_data == False:
1684
      raise errors.OpExecError("Can't gather the list of OSes")
1685
    pol = self._DiagnoseByOS(valid_nodes, node_data)
1686
    output = []
1687
    for os_name, os_data in pol.iteritems():
1688
      row = []
1689
      for field in self.op.output_fields:
1690
        if field == "name":
1691
          val = os_name
1692
        elif field == "valid":
1693
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1694
        elif field == "node_status":
1695
          val = {}
1696
          for node_name, nos_list in os_data.iteritems():
1697
            val[node_name] = [(v.status, v.path) for v in nos_list]
1698
        else:
1699
          raise errors.ParameterError(field)
1700
        row.append(val)
1701
      output.append(row)
1702

    
1703
    return output
1704

    
1705

    
1706
class LURemoveNode(LogicalUnit):
1707
  """Logical unit for removing a node.
1708

1709
  """
1710
  HPATH = "node-remove"
1711
  HTYPE = constants.HTYPE_NODE
1712
  _OP_REQP = ["node_name"]
1713

    
1714
  def BuildHooksEnv(self):
1715
    """Build hooks env.
1716

1717
    This doesn't run on the target node in the pre phase as a failed
1718
    node would then be impossible to remove.
1719

1720
    """
1721
    env = {
1722
      "OP_TARGET": self.op.node_name,
1723
      "NODE_NAME": self.op.node_name,
1724
      }
1725
    all_nodes = self.cfg.GetNodeList()
1726
    all_nodes.remove(self.op.node_name)
1727
    return env, all_nodes, all_nodes
1728

    
1729
  def CheckPrereq(self):
1730
    """Check prerequisites.
1731

1732
    This checks:
1733
     - the node exists in the configuration
1734
     - it does not have primary or secondary instances
1735
     - it's not the master
1736

1737
    Any errors are signalled by raising errors.OpPrereqError.
1738

1739
    """
1740
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1741
    if node is None:
1742
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1743

    
1744
    instance_list = self.cfg.GetInstanceList()
1745

    
1746
    masternode = self.cfg.GetMasterNode()
1747
    if node.name == masternode:
1748
      raise errors.OpPrereqError("Node is the master node,"
1749
                                 " you need to failover first.")
1750

    
1751
    for instance_name in instance_list:
1752
      instance = self.cfg.GetInstanceInfo(instance_name)
1753
      if node.name in instance.all_nodes:
1754
        raise errors.OpPrereqError("Instance %s is still running on the node,"
1755
                                   " please remove first." % instance_name)
1756
    self.op.node_name = node.name
1757
    self.node = node
1758

    
1759
  def Exec(self, feedback_fn):
1760
    """Removes the node from the cluster.
1761

1762
    """
1763
    node = self.node
1764
    logging.info("Stopping the node daemon and removing configs from node %s",
1765
                 node.name)
1766

    
1767
    self.context.RemoveNode(node.name)
1768

    
1769
    self.rpc.call_node_leave_cluster(node.name)
1770

    
1771
    # Promote nodes to master candidate as needed
1772
    _AdjustCandidatePool(self)
1773

    
1774

    
1775
class LUQueryNodes(NoHooksLU):
1776
  """Logical unit for querying nodes.
1777

1778
  """
1779
  _OP_REQP = ["output_fields", "names", "use_locking"]
1780
  REQ_BGL = False
1781
  _FIELDS_DYNAMIC = utils.FieldSet(
1782
    "dtotal", "dfree",
1783
    "mtotal", "mnode", "mfree",
1784
    "bootid",
1785
    "ctotal", "cnodes", "csockets",
1786
    )
1787

    
1788
  _FIELDS_STATIC = utils.FieldSet(
1789
    "name", "pinst_cnt", "sinst_cnt",
1790
    "pinst_list", "sinst_list",
1791
    "pip", "sip", "tags",
1792
    "serial_no",
1793
    "master_candidate",
1794
    "master",
1795
    "offline",
1796
    "drained",
1797
    )
1798

    
1799
  def ExpandNames(self):
1800
    _CheckOutputFields(static=self._FIELDS_STATIC,
1801
                       dynamic=self._FIELDS_DYNAMIC,
1802
                       selected=self.op.output_fields)
1803

    
1804
    self.needed_locks = {}
1805
    self.share_locks[locking.LEVEL_NODE] = 1
1806

    
1807
    if self.op.names:
1808
      self.wanted = _GetWantedNodes(self, self.op.names)
1809
    else:
1810
      self.wanted = locking.ALL_SET
1811

    
1812
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
1813
    self.do_locking = self.do_node_query and self.op.use_locking
1814
    if self.do_locking:
1815
      # if we don't request only static fields, we need to lock the nodes
1816
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
1817

    
1818

    
1819
  def CheckPrereq(self):
1820
    """Check prerequisites.
1821

1822
    """
1823
    # The validation of the node list is done in the _GetWantedNodes,
1824
    # if non empty, and if empty, there's no validation to do
1825
    pass
1826

    
1827
  def Exec(self, feedback_fn):
1828
    """Computes the list of nodes and their attributes.
1829

1830
    """
1831
    all_info = self.cfg.GetAllNodesInfo()
1832
    if self.do_locking:
1833
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
1834
    elif self.wanted != locking.ALL_SET:
1835
      nodenames = self.wanted
1836
      missing = set(nodenames).difference(all_info.keys())
1837
      if missing:
1838
        raise errors.OpExecError(
1839
          "Some nodes were removed before retrieving their data: %s" % missing)
1840
    else:
1841
      nodenames = all_info.keys()
1842

    
1843
    nodenames = utils.NiceSort(nodenames)
1844
    nodelist = [all_info[name] for name in nodenames]
1845

    
1846
    # begin data gathering
1847

    
1848
    if self.do_node_query:
1849
      live_data = {}
1850
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
1851
                                          self.cfg.GetHypervisorType())
1852
      for name in nodenames:
1853
        nodeinfo = node_data[name]
1854
        if not nodeinfo.failed and nodeinfo.data:
1855
          nodeinfo = nodeinfo.data
1856
          fn = utils.TryConvert
1857
          live_data[name] = {
1858
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
1859
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
1860
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
1861
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
1862
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
1863
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
1864
            "bootid": nodeinfo.get('bootid', None),
1865
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
1866
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
1867
            }
1868
        else:
1869
          live_data[name] = {}
1870
    else:
1871
      live_data = dict.fromkeys(nodenames, {})
1872

    
1873
    node_to_primary = dict([(name, set()) for name in nodenames])
1874
    node_to_secondary = dict([(name, set()) for name in nodenames])
1875

    
1876
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1877
                             "sinst_cnt", "sinst_list"))
1878
    if inst_fields & frozenset(self.op.output_fields):
1879
      instancelist = self.cfg.GetInstanceList()
1880

    
1881
      for instance_name in instancelist:
1882
        inst = self.cfg.GetInstanceInfo(instance_name)
1883
        if inst.primary_node in node_to_primary:
1884
          node_to_primary[inst.primary_node].add(inst.name)
1885
        for secnode in inst.secondary_nodes:
1886
          if secnode in node_to_secondary:
1887
            node_to_secondary[secnode].add(inst.name)
1888

    
1889
    master_node = self.cfg.GetMasterNode()
1890

    
1891
    # end data gathering
1892

    
1893
    output = []
1894
    for node in nodelist:
1895
      node_output = []
1896
      for field in self.op.output_fields:
1897
        if field == "name":
1898
          val = node.name
1899
        elif field == "pinst_list":
1900
          val = list(node_to_primary[node.name])
1901
        elif field == "sinst_list":
1902
          val = list(node_to_secondary[node.name])
1903
        elif field == "pinst_cnt":
1904
          val = len(node_to_primary[node.name])
1905
        elif field == "sinst_cnt":
1906
          val = len(node_to_secondary[node.name])
1907
        elif field == "pip":
1908
          val = node.primary_ip
1909
        elif field == "sip":
1910
          val = node.secondary_ip
1911
        elif field == "tags":
1912
          val = list(node.GetTags())
1913
        elif field == "serial_no":
1914
          val = node.serial_no
1915
        elif field == "master_candidate":
1916
          val = node.master_candidate
1917
        elif field == "master":
1918
          val = node.name == master_node
1919
        elif field == "offline":
1920
          val = node.offline
1921
        elif field == "drained":
1922
          val = node.drained
1923
        elif self._FIELDS_DYNAMIC.Matches(field):
1924
          val = live_data[node.name].get(field, None)
1925
        else:
1926
          raise errors.ParameterError(field)
1927
        node_output.append(val)
1928
      output.append(node_output)
1929

    
1930
    return output
1931

    
1932

    
1933
class LUQueryNodeVolumes(NoHooksLU):
1934
  """Logical unit for getting volumes on node(s).
1935

1936
  """
1937
  _OP_REQP = ["nodes", "output_fields"]
1938
  REQ_BGL = False
1939
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
1940
  _FIELDS_STATIC = utils.FieldSet("node")
1941

    
1942
  def ExpandNames(self):
1943
    _CheckOutputFields(static=self._FIELDS_STATIC,
1944
                       dynamic=self._FIELDS_DYNAMIC,
1945
                       selected=self.op.output_fields)
1946

    
1947
    self.needed_locks = {}
1948
    self.share_locks[locking.LEVEL_NODE] = 1
1949
    if not self.op.nodes:
1950
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1951
    else:
1952
      self.needed_locks[locking.LEVEL_NODE] = \
1953
        _GetWantedNodes(self, self.op.nodes)
1954

    
1955
  def CheckPrereq(self):
1956
    """Check prerequisites.
1957

1958
    This checks that the fields required are valid output fields.
1959

1960
    """
1961
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
1962

    
1963
  def Exec(self, feedback_fn):
1964
    """Computes the list of nodes and their attributes.
1965

1966
    """
1967
    nodenames = self.nodes
1968
    volumes = self.rpc.call_node_volumes(nodenames)
1969

    
1970
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1971
             in self.cfg.GetInstanceList()]
1972

    
1973
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1974

    
1975
    output = []
1976
    for node in nodenames:
1977
      if node not in volumes or volumes[node].failed or not volumes[node].data:
1978
        continue
1979

    
1980
      node_vols = volumes[node].data[:]
1981
      node_vols.sort(key=lambda vol: vol['dev'])
1982

    
1983
      for vol in node_vols:
1984
        node_output = []
1985
        for field in self.op.output_fields:
1986
          if field == "node":
1987
            val = node
1988
          elif field == "phys":
1989
            val = vol['dev']
1990
          elif field == "vg":
1991
            val = vol['vg']
1992
          elif field == "name":
1993
            val = vol['name']
1994
          elif field == "size":
1995
            val = int(float(vol['size']))
1996
          elif field == "instance":
1997
            for inst in ilist:
1998
              if node not in lv_by_node[inst]:
1999
                continue
2000
              if vol['name'] in lv_by_node[inst][node]:
2001
                val = inst.name
2002
                break
2003
            else:
2004
              val = '-'
2005
          else:
2006
            raise errors.ParameterError(field)
2007
          node_output.append(str(val))
2008

    
2009
        output.append(node_output)
2010

    
2011
    return output
2012

    
2013

    
2014
class LUAddNode(LogicalUnit):
2015
  """Logical unit for adding node to the cluster.
2016

2017
  """
2018
  HPATH = "node-add"
2019
  HTYPE = constants.HTYPE_NODE
2020
  _OP_REQP = ["node_name"]
2021

    
2022
  def BuildHooksEnv(self):
2023
    """Build hooks env.
2024

2025
    This will run on all nodes before, and on all nodes + the new node after.
2026

2027
    """
2028
    env = {
2029
      "OP_TARGET": self.op.node_name,
2030
      "NODE_NAME": self.op.node_name,
2031
      "NODE_PIP": self.op.primary_ip,
2032
      "NODE_SIP": self.op.secondary_ip,
2033
      }
2034
    nodes_0 = self.cfg.GetNodeList()
2035
    nodes_1 = nodes_0 + [self.op.node_name, ]
2036
    return env, nodes_0, nodes_1
2037

    
2038
  def CheckPrereq(self):
2039
    """Check prerequisites.
2040

2041
    This checks:
2042
     - the new node is not already in the config
2043
     - it is resolvable
2044
     - its parameters (single/dual homed) matches the cluster
2045

2046
    Any errors are signalled by raising errors.OpPrereqError.
2047

2048
    """
2049
    node_name = self.op.node_name
2050
    cfg = self.cfg
2051

    
2052
    dns_data = utils.HostInfo(node_name)
2053

    
2054
    node = dns_data.name
2055
    primary_ip = self.op.primary_ip = dns_data.ip
2056
    secondary_ip = getattr(self.op, "secondary_ip", None)
2057
    if secondary_ip is None:
2058
      secondary_ip = primary_ip
2059
    if not utils.IsValidIP(secondary_ip):
2060
      raise errors.OpPrereqError("Invalid secondary IP given")
2061
    self.op.secondary_ip = secondary_ip
2062

    
2063
    node_list = cfg.GetNodeList()
2064
    if not self.op.readd and node in node_list:
2065
      raise errors.OpPrereqError("Node %s is already in the configuration" %
2066
                                 node)
2067
    elif self.op.readd and node not in node_list:
2068
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
2069

    
2070
    for existing_node_name in node_list:
2071
      existing_node = cfg.GetNodeInfo(existing_node_name)
2072

    
2073
      if self.op.readd and node == existing_node_name:
2074
        if (existing_node.primary_ip != primary_ip or
2075
            existing_node.secondary_ip != secondary_ip):
2076
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
2077
                                     " address configuration as before")
2078
        continue
2079

    
2080
      if (existing_node.primary_ip == primary_ip or
2081
          existing_node.secondary_ip == primary_ip or
2082
          existing_node.primary_ip == secondary_ip or
2083
          existing_node.secondary_ip == secondary_ip):
2084
        raise errors.OpPrereqError("New node ip address(es) conflict with"
2085
                                   " existing node %s" % existing_node.name)
2086

    
2087
    # check that the type of the node (single versus dual homed) is the
2088
    # same as for the master
2089
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
2090
    master_singlehomed = myself.secondary_ip == myself.primary_ip
2091
    newbie_singlehomed = secondary_ip == primary_ip
2092
    if master_singlehomed != newbie_singlehomed:
2093
      if master_singlehomed:
2094
        raise errors.OpPrereqError("The master has no private ip but the"
2095
                                   " new node has one")
2096
      else:
2097
        raise errors.OpPrereqError("The master has a private ip but the"
2098
                                   " new node doesn't have one")
2099

    
2100
    # checks reachablity
2101
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
2102
      raise errors.OpPrereqError("Node not reachable by ping")
2103

    
2104
    if not newbie_singlehomed:
2105
      # check reachability from my secondary ip to newbie's secondary ip
2106
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
2107
                           source=myself.secondary_ip):
2108
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
2109
                                   " based ping to noded port")
2110

    
2111
    cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2112
    mc_now, _ = self.cfg.GetMasterCandidateStats()
2113
    master_candidate = mc_now < cp_size
2114

    
2115
    self.new_node = objects.Node(name=node,
2116
                                 primary_ip=primary_ip,
2117
                                 secondary_ip=secondary_ip,
2118
                                 master_candidate=master_candidate,
2119
                                 offline=False, drained=False)
2120

    
2121
  def Exec(self, feedback_fn):
2122
    """Adds the new node to the cluster.
2123

2124
    """
2125
    new_node = self.new_node
2126
    node = new_node.name
2127

    
2128
    # check connectivity
2129
    result = self.rpc.call_version([node])[node]
2130
    result.Raise()
2131
    if result.data:
2132
      if constants.PROTOCOL_VERSION == result.data:
2133
        logging.info("Communication to node %s fine, sw version %s match",
2134
                     node, result.data)
2135
      else:
2136
        raise errors.OpExecError("Version mismatch master version %s,"
2137
                                 " node version %s" %
2138
                                 (constants.PROTOCOL_VERSION, result.data))
2139
    else:
2140
      raise errors.OpExecError("Cannot get version from the new node")
2141

    
2142
    # setup ssh on node
2143
    logging.info("Copy ssh key to node %s", node)
2144
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
2145
    keyarray = []
2146
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
2147
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
2148
                priv_key, pub_key]
2149

    
2150
    for i in keyfiles:
2151
      f = open(i, 'r')
2152
      try:
2153
        keyarray.append(f.read())
2154
      finally:
2155
        f.close()
2156

    
2157
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
2158
                                    keyarray[2],
2159
                                    keyarray[3], keyarray[4], keyarray[5])
2160

    
2161
    msg = result.RemoteFailMsg()
2162
    if msg:
2163
      raise errors.OpExecError("Cannot transfer ssh keys to the"
2164
                               " new node: %s" % msg)
2165

    
2166
    # Add node to our /etc/hosts, and add key to known_hosts
2167
    utils.AddHostToEtcHosts(new_node.name)
2168

    
2169
    if new_node.secondary_ip != new_node.primary_ip:
2170
      result = self.rpc.call_node_has_ip_address(new_node.name,
2171
                                                 new_node.secondary_ip)
2172
      if result.failed or not result.data:
2173
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
2174
                                 " you gave (%s). Please fix and re-run this"
2175
                                 " command." % new_node.secondary_ip)
2176

    
2177
    node_verify_list = [self.cfg.GetMasterNode()]
2178
    node_verify_param = {
2179
      'nodelist': [node],
2180
      # TODO: do a node-net-test as well?
2181
    }
2182

    
2183
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
2184
                                       self.cfg.GetClusterName())
2185
    for verifier in node_verify_list:
2186
      if result[verifier].failed or not result[verifier].data:
2187
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
2188
                                 " for remote verification" % verifier)
2189
      if result[verifier].data['nodelist']:
2190
        for failed in result[verifier].data['nodelist']:
2191
          feedback_fn("ssh/hostname verification failed %s -> %s" %
2192
                      (verifier, result[verifier].data['nodelist'][failed]))
2193
        raise errors.OpExecError("ssh/hostname verification failed.")
2194

    
2195
    # Distribute updated /etc/hosts and known_hosts to all nodes,
2196
    # including the node just added
2197
    myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
2198
    dist_nodes = self.cfg.GetNodeList()
2199
    if not self.op.readd:
2200
      dist_nodes.append(node)
2201
    if myself.name in dist_nodes:
2202
      dist_nodes.remove(myself.name)
2203

    
2204
    logging.debug("Copying hosts and known_hosts to all nodes")
2205
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
2206
      result = self.rpc.call_upload_file(dist_nodes, fname)
2207
      for to_node, to_result in result.iteritems():
2208
        if to_result.failed or not to_result.data:
2209
          logging.error("Copy of file %s to node %s failed", fname, to_node)
2210

    
2211
    to_copy = []
2212
    enabled_hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
2213
    if constants.HTS_USE_VNC.intersection(enabled_hypervisors):
2214
      to_copy.append(constants.VNC_PASSWORD_FILE)
2215

    
2216
    for fname in to_copy:
2217
      result = self.rpc.call_upload_file([node], fname)
2218
      if result[node].failed or not result[node]:
2219
        logging.error("Could not copy file %s to node %s", fname, node)
2220

    
2221
    if self.op.readd:
2222
      self.context.ReaddNode(new_node)
2223
    else:
2224
      self.context.AddNode(new_node)
2225

    
2226

    
2227
class LUSetNodeParams(LogicalUnit):
2228
  """Modifies the parameters of a node.
2229

2230
  """
2231
  HPATH = "node-modify"
2232
  HTYPE = constants.HTYPE_NODE
2233
  _OP_REQP = ["node_name"]
2234
  REQ_BGL = False
2235

    
2236
  def CheckArguments(self):
2237
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2238
    if node_name is None:
2239
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2240
    self.op.node_name = node_name
2241
    _CheckBooleanOpField(self.op, 'master_candidate')
2242
    _CheckBooleanOpField(self.op, 'offline')
2243
    if self.op.master_candidate is None and self.op.offline is None:
2244
      raise errors.OpPrereqError("Please pass at least one modification")
2245
    if self.op.offline == True and self.op.master_candidate == True:
2246
      raise errors.OpPrereqError("Can't set the node into offline and"
2247
                                 " master_candidate at the same time")
2248

    
2249
  def ExpandNames(self):
2250
    self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
2251

    
2252
  def BuildHooksEnv(self):
2253
    """Build hooks env.
2254

2255
    This runs on the master node.
2256

2257
    """
2258
    env = {
2259
      "OP_TARGET": self.op.node_name,
2260
      "MASTER_CANDIDATE": str(self.op.master_candidate),
2261
      "OFFLINE": str(self.op.offline),
2262
      }
2263
    nl = [self.cfg.GetMasterNode(),
2264
          self.op.node_name]
2265
    return env, nl, nl
2266

    
2267
  def CheckPrereq(self):
2268
    """Check prerequisites.
2269

2270
    This only checks the instance list against the existing names.
2271

2272
    """
2273
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
2274

    
2275
    if ((self.op.master_candidate == False or self.op.offline == True)
2276
        and node.master_candidate):
2277
      # we will demote the node from master_candidate
2278
      if self.op.node_name == self.cfg.GetMasterNode():
2279
        raise errors.OpPrereqError("The master node has to be a"
2280
                                   " master candidate and online")
2281
      cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2282
      num_candidates, _ = self.cfg.GetMasterCandidateStats()
2283
      if num_candidates <= cp_size:
2284
        msg = ("Not enough master candidates (desired"
2285
               " %d, new value will be %d)" % (cp_size, num_candidates-1))
2286
        if self.op.force:
2287
          self.LogWarning(msg)
2288
        else:
2289
          raise errors.OpPrereqError(msg)
2290

    
2291
    if (self.op.master_candidate == True and node.offline and
2292
        not self.op.offline == False):
2293
      raise errors.OpPrereqError("Can't set an offline node to"
2294
                                 " master_candidate")
2295

    
2296
    return
2297

    
2298
  def Exec(self, feedback_fn):
2299
    """Modifies a node.
2300

2301
    """
2302
    node = self.node
2303

    
2304
    result = []
2305

    
2306
    if self.op.offline is not None:
2307
      node.offline = self.op.offline
2308
      result.append(("offline", str(self.op.offline)))
2309
      if self.op.offline == True and node.master_candidate:
2310
        node.master_candidate = False
2311
        result.append(("master_candidate", "auto-demotion due to offline"))
2312

    
2313
    if self.op.master_candidate is not None:
2314
      node.master_candidate = self.op.master_candidate
2315
      result.append(("master_candidate", str(self.op.master_candidate)))
2316
      if self.op.master_candidate == False:
2317
        rrc = self.rpc.call_node_demote_from_mc(node.name)
2318
        msg = rrc.RemoteFailMsg()
2319
        if msg:
2320
          self.LogWarning("Node failed to demote itself: %s" % msg)
2321

    
2322
    # this will trigger configuration file update, if needed
2323
    self.cfg.Update(node)
2324
    # this will trigger job queue propagation or cleanup
2325
    if self.op.node_name != self.cfg.GetMasterNode():
2326
      self.context.ReaddNode(node)
2327

    
2328
    return result
2329

    
2330

    
2331
class LUQueryClusterInfo(NoHooksLU):
2332
  """Query cluster configuration.
2333

2334
  """
2335
  _OP_REQP = []
2336
  REQ_BGL = False
2337

    
2338
  def ExpandNames(self):
2339
    self.needed_locks = {}
2340

    
2341
  def CheckPrereq(self):
2342
    """No prerequsites needed for this LU.
2343

2344
    """
2345
    pass
2346

    
2347
  def Exec(self, feedback_fn):
2348
    """Return cluster config.
2349

2350
    """
2351
    cluster = self.cfg.GetClusterInfo()
2352
    result = {
2353
      "software_version": constants.RELEASE_VERSION,
2354
      "protocol_version": constants.PROTOCOL_VERSION,
2355
      "config_version": constants.CONFIG_VERSION,
2356
      "os_api_version": constants.OS_API_VERSION,
2357
      "export_version": constants.EXPORT_VERSION,
2358
      "architecture": (platform.architecture()[0], platform.machine()),
2359
      "name": cluster.cluster_name,
2360
      "master": cluster.master_node,
2361
      "default_hypervisor": cluster.default_hypervisor,
2362
      "enabled_hypervisors": cluster.enabled_hypervisors,
2363
      "hvparams": dict([(hypervisor, cluster.hvparams[hypervisor])
2364
                        for hypervisor in cluster.enabled_hypervisors]),
2365
      "beparams": cluster.beparams,
2366
      "candidate_pool_size": cluster.candidate_pool_size,
2367
      }
2368

    
2369
    return result
2370

    
2371

    
2372
class LUQueryConfigValues(NoHooksLU):
2373
  """Return configuration values.
2374

2375
  """
2376
  _OP_REQP = []
2377
  REQ_BGL = False
2378
  _FIELDS_DYNAMIC = utils.FieldSet()
2379
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag")
2380

    
2381
  def ExpandNames(self):
2382
    self.needed_locks = {}
2383

    
2384
    _CheckOutputFields(static=self._FIELDS_STATIC,
2385
                       dynamic=self._FIELDS_DYNAMIC,
2386
                       selected=self.op.output_fields)
2387

    
2388
  def CheckPrereq(self):
2389
    """No prerequisites.
2390

2391
    """
2392
    pass
2393

    
2394
  def Exec(self, feedback_fn):
2395
    """Dump a representation of the cluster config to the standard output.
2396

2397
    """
2398
    values = []
2399
    for field in self.op.output_fields:
2400
      if field == "cluster_name":
2401
        entry = self.cfg.GetClusterName()
2402
      elif field == "master_node":
2403
        entry = self.cfg.GetMasterNode()
2404
      elif field == "drain_flag":
2405
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2406
      else:
2407
        raise errors.ParameterError(field)
2408
      values.append(entry)
2409
    return values
2410

    
2411

    
2412
class LUActivateInstanceDisks(NoHooksLU):
2413
  """Bring up an instance's disks.
2414

2415
  """
2416
  _OP_REQP = ["instance_name"]
2417
  REQ_BGL = False
2418

    
2419
  def ExpandNames(self):
2420
    self._ExpandAndLockInstance()
2421
    self.needed_locks[locking.LEVEL_NODE] = []
2422
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2423

    
2424
  def DeclareLocks(self, level):
2425
    if level == locking.LEVEL_NODE:
2426
      self._LockInstancesNodes()
2427

    
2428
  def CheckPrereq(self):
2429
    """Check prerequisites.
2430

2431
    This checks that the instance is in the cluster.
2432

2433
    """
2434
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2435
    assert self.instance is not None, \
2436
      "Cannot retrieve locked instance %s" % self.op.instance_name
2437
    _CheckNodeOnline(self, self.instance.primary_node)
2438

    
2439
  def Exec(self, feedback_fn):
2440
    """Activate the disks.
2441

2442
    """
2443
    disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
2444
    if not disks_ok:
2445
      raise errors.OpExecError("Cannot activate block devices")
2446

    
2447
    return disks_info
2448

    
2449

    
2450
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
2451
  """Prepare the block devices for an instance.
2452

2453
  This sets up the block devices on all nodes.
2454

2455
  @type lu: L{LogicalUnit}
2456
  @param lu: the logical unit on whose behalf we execute
2457
  @type instance: L{objects.Instance}
2458
  @param instance: the instance for whose disks we assemble
2459
  @type ignore_secondaries: boolean
2460
  @param ignore_secondaries: if true, errors on secondary nodes
2461
      won't result in an error return from the function
2462
  @return: False if the operation failed, otherwise a list of
2463
      (host, instance_visible_name, node_visible_name)
2464
      with the mapping from node devices to instance devices
2465

2466
  """
2467
  device_info = []
2468
  disks_ok = True
2469
  iname = instance.name
2470
  # With the two passes mechanism we try to reduce the window of
2471
  # opportunity for the race condition of switching DRBD to primary
2472
  # before handshaking occured, but we do not eliminate it
2473

    
2474
  # The proper fix would be to wait (with some limits) until the
2475
  # connection has been made and drbd transitions from WFConnection
2476
  # into any other network-connected state (Connected, SyncTarget,
2477
  # SyncSource, etc.)
2478

    
2479
  # 1st pass, assemble on all nodes in secondary mode
2480
  for inst_disk in instance.disks:
2481
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2482
      lu.cfg.SetDiskID(node_disk, node)
2483
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2484
      msg = result.RemoteFailMsg()
2485
      if msg:
2486
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2487
                           " (is_primary=False, pass=1): %s",
2488
                           inst_disk.iv_name, node, msg)
2489
        if not ignore_secondaries:
2490
          disks_ok = False
2491

    
2492
  # FIXME: race condition on drbd migration to primary
2493

    
2494
  # 2nd pass, do only the primary node
2495
  for inst_disk in instance.disks:
2496
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2497
      if node != instance.primary_node:
2498
        continue
2499
      lu.cfg.SetDiskID(node_disk, node)
2500
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2501
      msg = result.RemoteFailMsg()
2502
      if msg:
2503
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2504
                           " (is_primary=True, pass=2): %s",
2505
                           inst_disk.iv_name, node, msg)
2506
        disks_ok = False
2507
    device_info.append((instance.primary_node, inst_disk.iv_name, result.data))
2508

    
2509
  # leave the disks configured for the primary node
2510
  # this is a workaround that would be fixed better by
2511
  # improving the logical/physical id handling
2512
  for disk in instance.disks:
2513
    lu.cfg.SetDiskID(disk, instance.primary_node)
2514

    
2515
  return disks_ok, device_info
2516

    
2517

    
2518
def _StartInstanceDisks(lu, instance, force):
2519
  """Start the disks of an instance.
2520

2521
  """
2522
  disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2523
                                           ignore_secondaries=force)
2524
  if not disks_ok:
2525
    _ShutdownInstanceDisks(lu, instance)
2526
    if force is not None and not force:
2527
      lu.proc.LogWarning("", hint="If the message above refers to a"
2528
                         " secondary node,"
2529
                         " you can retry the operation using '--force'.")
2530
    raise errors.OpExecError("Disk consistency error")
2531

    
2532

    
2533
class LUDeactivateInstanceDisks(NoHooksLU):
2534
  """Shutdown an instance's disks.
2535

2536
  """
2537
  _OP_REQP = ["instance_name"]
2538
  REQ_BGL = False
2539

    
2540
  def ExpandNames(self):
2541
    self._ExpandAndLockInstance()
2542
    self.needed_locks[locking.LEVEL_NODE] = []
2543
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2544

    
2545
  def DeclareLocks(self, level):
2546
    if level == locking.LEVEL_NODE:
2547
      self._LockInstancesNodes()
2548

    
2549
  def CheckPrereq(self):
2550
    """Check prerequisites.
2551

2552
    This checks that the instance is in the cluster.
2553

2554
    """
2555
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2556
    assert self.instance is not None, \
2557
      "Cannot retrieve locked instance %s" % self.op.instance_name
2558

    
2559
  def Exec(self, feedback_fn):
2560
    """Deactivate the disks
2561

2562
    """
2563
    instance = self.instance
2564
    _SafeShutdownInstanceDisks(self, instance)
2565

    
2566

    
2567
def _SafeShutdownInstanceDisks(lu, instance):
2568
  """Shutdown block devices of an instance.
2569

2570
  This function checks if an instance is running, before calling
2571
  _ShutdownInstanceDisks.
2572

2573
  """
2574
  ins_l = lu.rpc.call_instance_list([instance.primary_node],
2575
                                      [instance.hypervisor])
2576
  ins_l = ins_l[instance.primary_node]
2577
  if ins_l.failed or not isinstance(ins_l.data, list):
2578
    raise errors.OpExecError("Can't contact node '%s'" %
2579
                             instance.primary_node)
2580

    
2581
  if instance.name in ins_l.data:
2582
    raise errors.OpExecError("Instance is running, can't shutdown"
2583
                             " block devices.")
2584

    
2585
  _ShutdownInstanceDisks(lu, instance)
2586

    
2587

    
2588
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2589
  """Shutdown block devices of an instance.
2590

2591
  This does the shutdown on all nodes of the instance.
2592

2593
  If the ignore_primary is false, errors on the primary node are
2594
  ignored.
2595

2596
  """
2597
  all_result = True
2598
  for disk in instance.disks:
2599
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2600
      lu.cfg.SetDiskID(top_disk, node)
2601
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
2602
      msg = result.RemoteFailMsg()
2603
      if msg:
2604
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
2605
                      disk.iv_name, node, msg)
2606
        if not ignore_primary or node != instance.primary_node:
2607
          all_result = False
2608
  return all_result
2609

    
2610

    
2611
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
2612
  """Checks if a node has enough free memory.
2613

2614
  This function check if a given node has the needed amount of free
2615
  memory. In case the node has less memory or we cannot get the
2616
  information from the node, this function raise an OpPrereqError
2617
  exception.
2618

2619
  @type lu: C{LogicalUnit}
2620
  @param lu: a logical unit from which we get configuration data
2621
  @type node: C{str}
2622
  @param node: the node to check
2623
  @type reason: C{str}
2624
  @param reason: string to use in the error message
2625
  @type requested: C{int}
2626
  @param requested: the amount of memory in MiB to check for
2627
  @type hypervisor_name: C{str}
2628
  @param hypervisor_name: the hypervisor to ask for memory stats
2629
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2630
      we cannot check the node
2631

2632
  """
2633
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
2634
  nodeinfo[node].Raise()
2635
  free_mem = nodeinfo[node].data.get('memory_free')
2636
  if not isinstance(free_mem, int):
2637
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2638
                             " was '%s'" % (node, free_mem))
2639
  if requested > free_mem:
2640
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2641
                             " needed %s MiB, available %s MiB" %
2642
                             (node, reason, requested, free_mem))
2643

    
2644

    
2645
class LUStartupInstance(LogicalUnit):
2646
  """Starts an instance.
2647

2648
  """
2649
  HPATH = "instance-start"
2650
  HTYPE = constants.HTYPE_INSTANCE
2651
  _OP_REQP = ["instance_name", "force"]
2652
  REQ_BGL = False
2653

    
2654
  def ExpandNames(self):
2655
    self._ExpandAndLockInstance()
2656

    
2657
  def BuildHooksEnv(self):
2658
    """Build hooks env.
2659

2660
    This runs on master, primary and secondary nodes of the instance.
2661

2662
    """
2663
    env = {
2664
      "FORCE": self.op.force,
2665
      }
2666
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2667
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2668
    return env, nl, nl
2669

    
2670
  def CheckPrereq(self):
2671
    """Check prerequisites.
2672

2673
    This checks that the instance is in the cluster.
2674

2675
    """
2676
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2677
    assert self.instance is not None, \
2678
      "Cannot retrieve locked instance %s" % self.op.instance_name
2679

    
2680
    _CheckNodeOnline(self, instance.primary_node)
2681

    
2682
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2683
    # check bridges existance
2684
    _CheckInstanceBridgesExist(self, instance)
2685

    
2686
    _CheckNodeFreeMemory(self, instance.primary_node,
2687
                         "starting instance %s" % instance.name,
2688
                         bep[constants.BE_MEMORY], instance.hypervisor)
2689

    
2690
  def Exec(self, feedback_fn):
2691
    """Start the instance.
2692

2693
    """
2694
    instance = self.instance
2695
    force = self.op.force
2696
    extra_args = getattr(self.op, "extra_args", "")
2697

    
2698
    self.cfg.MarkInstanceUp(instance.name)
2699

    
2700
    node_current = instance.primary_node
2701

    
2702
    _StartInstanceDisks(self, instance, force)
2703

    
2704
    result = self.rpc.call_instance_start(node_current, instance, extra_args)
2705
    msg = result.RemoteFailMsg()
2706
    if msg:
2707
      _ShutdownInstanceDisks(self, instance)
2708
      raise errors.OpExecError("Could not start instance: %s" % msg)
2709

    
2710

    
2711
class LURebootInstance(LogicalUnit):
2712
  """Reboot an instance.
2713

2714
  """
2715
  HPATH = "instance-reboot"
2716
  HTYPE = constants.HTYPE_INSTANCE
2717
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2718
  REQ_BGL = False
2719

    
2720
  def ExpandNames(self):
2721
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2722
                                   constants.INSTANCE_REBOOT_HARD,
2723
                                   constants.INSTANCE_REBOOT_FULL]:
2724
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2725
                                  (constants.INSTANCE_REBOOT_SOFT,
2726
                                   constants.INSTANCE_REBOOT_HARD,
2727
                                   constants.INSTANCE_REBOOT_FULL))
2728
    self._ExpandAndLockInstance()
2729

    
2730
  def BuildHooksEnv(self):
2731
    """Build hooks env.
2732

2733
    This runs on master, primary and secondary nodes of the instance.
2734

2735
    """
2736
    env = {
2737
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2738
      }
2739
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2740
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2741
    return env, nl, nl
2742

    
2743
  def CheckPrereq(self):
2744
    """Check prerequisites.
2745

2746
    This checks that the instance is in the cluster.
2747

2748
    """
2749
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2750
    assert self.instance is not None, \
2751
      "Cannot retrieve locked instance %s" % self.op.instance_name
2752

    
2753
    _CheckNodeOnline(self, instance.primary_node)
2754

    
2755
    # check bridges existance
2756
    _CheckInstanceBridgesExist(self, instance)
2757

    
2758
  def Exec(self, feedback_fn):
2759
    """Reboot the instance.
2760

2761
    """
2762
    instance = self.instance
2763
    ignore_secondaries = self.op.ignore_secondaries
2764
    reboot_type = self.op.reboot_type
2765
    extra_args = getattr(self.op, "extra_args", "")
2766

    
2767
    node_current = instance.primary_node
2768

    
2769
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2770
                       constants.INSTANCE_REBOOT_HARD]:
2771
      result = self.rpc.call_instance_reboot(node_current, instance,
2772
                                             reboot_type, extra_args)
2773
      if result.failed or not result.data:
2774
        raise errors.OpExecError("Could not reboot instance")
2775
    else:
2776
      if not self.rpc.call_instance_shutdown(node_current, instance):
2777
        raise errors.OpExecError("could not shutdown instance for full reboot")
2778
      _ShutdownInstanceDisks(self, instance)
2779
      _StartInstanceDisks(self, instance, ignore_secondaries)
2780
      result = self.rpc.call_instance_start(node_current, instance, extra_args)
2781
      msg = result.RemoteFailMsg()
2782
      if msg:
2783
        _ShutdownInstanceDisks(self, instance)
2784
        raise errors.OpExecError("Could not start instance for"
2785
                                 " full reboot: %s" % msg)
2786

    
2787
    self.cfg.MarkInstanceUp(instance.name)
2788

    
2789

    
2790
class LUShutdownInstance(LogicalUnit):
2791
  """Shutdown an instance.
2792

2793
  """
2794
  HPATH = "instance-stop"
2795
  HTYPE = constants.HTYPE_INSTANCE
2796
  _OP_REQP = ["instance_name"]
2797
  REQ_BGL = False
2798

    
2799
  def ExpandNames(self):
2800
    self._ExpandAndLockInstance()
2801

    
2802
  def BuildHooksEnv(self):
2803
    """Build hooks env.
2804

2805
    This runs on master, primary and secondary nodes of the instance.
2806

2807
    """
2808
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2809
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2810
    return env, nl, nl
2811

    
2812
  def CheckPrereq(self):
2813
    """Check prerequisites.
2814

2815
    This checks that the instance is in the cluster.
2816

2817
    """
2818
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2819
    assert self.instance is not None, \
2820
      "Cannot retrieve locked instance %s" % self.op.instance_name
2821
    _CheckNodeOnline(self, self.instance.primary_node)
2822

    
2823
  def Exec(self, feedback_fn):
2824
    """Shutdown the instance.
2825

2826
    """
2827
    instance = self.instance
2828
    node_current = instance.primary_node
2829
    self.cfg.MarkInstanceDown(instance.name)
2830
    result = self.rpc.call_instance_shutdown(node_current, instance)
2831
    if result.failed or not result.data:
2832
      self.proc.LogWarning("Could not shutdown instance")
2833

    
2834
    _ShutdownInstanceDisks(self, instance)
2835

    
2836

    
2837
class LUReinstallInstance(LogicalUnit):
2838
  """Reinstall an instance.
2839

2840
  """
2841
  HPATH = "instance-reinstall"
2842
  HTYPE = constants.HTYPE_INSTANCE
2843
  _OP_REQP = ["instance_name"]
2844
  REQ_BGL = False
2845

    
2846
  def ExpandNames(self):
2847
    self._ExpandAndLockInstance()
2848

    
2849
  def BuildHooksEnv(self):
2850
    """Build hooks env.
2851

2852
    This runs on master, primary and secondary nodes of the instance.
2853

2854
    """
2855
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2856
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2857
    return env, nl, nl
2858

    
2859
  def CheckPrereq(self):
2860
    """Check prerequisites.
2861

2862
    This checks that the instance is in the cluster and is not running.
2863

2864
    """
2865
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2866
    assert instance is not None, \
2867
      "Cannot retrieve locked instance %s" % self.op.instance_name
2868
    _CheckNodeOnline(self, instance.primary_node)
2869

    
2870
    if instance.disk_template == constants.DT_DISKLESS:
2871
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2872
                                 self.op.instance_name)
2873
    if instance.admin_up:
2874
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2875
                                 self.op.instance_name)
2876
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2877
                                              instance.name,
2878
                                              instance.hypervisor)
2879
    if remote_info.failed or remote_info.data:
2880
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2881
                                 (self.op.instance_name,
2882
                                  instance.primary_node))
2883

    
2884
    self.op.os_type = getattr(self.op, "os_type", None)
2885
    if self.op.os_type is not None:
2886
      # OS verification
2887
      pnode = self.cfg.GetNodeInfo(
2888
        self.cfg.ExpandNodeName(instance.primary_node))
2889
      if pnode is None:
2890
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2891
                                   self.op.pnode)
2892
      result = self.rpc.call_os_get(pnode.name, self.op.os_type)
2893
      result.Raise()
2894
      if not isinstance(result.data, objects.OS):
2895
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2896
                                   " primary node"  % self.op.os_type)
2897

    
2898
    self.instance = instance
2899

    
2900
  def Exec(self, feedback_fn):
2901
    """Reinstall the instance.
2902

2903
    """
2904
    inst = self.instance
2905

    
2906
    if self.op.os_type is not None:
2907
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2908
      inst.os = self.op.os_type
2909
      self.cfg.Update(inst)
2910

    
2911
    _StartInstanceDisks(self, inst, None)
2912
    try:
2913
      feedback_fn("Running the instance OS create scripts...")
2914
      result = self.rpc.call_instance_os_add(inst.primary_node, inst)
2915
      msg = result.RemoteFailMsg()
2916
      if msg:
2917
        raise errors.OpExecError("Could not install OS for instance %s"
2918
                                 " on node %s: %s" %
2919
                                 (inst.name, inst.primary_node, msg))
2920
    finally:
2921
      _ShutdownInstanceDisks(self, inst)
2922

    
2923

    
2924
class LURenameInstance(LogicalUnit):
2925
  """Rename an instance.
2926

2927
  """
2928
  HPATH = "instance-rename"
2929
  HTYPE = constants.HTYPE_INSTANCE
2930
  _OP_REQP = ["instance_name", "new_name"]
2931

    
2932
  def BuildHooksEnv(self):
2933
    """Build hooks env.
2934

2935
    This runs on master, primary and secondary nodes of the instance.
2936

2937
    """
2938
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2939
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2940
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2941
    return env, nl, nl
2942

    
2943
  def CheckPrereq(self):
2944
    """Check prerequisites.
2945

2946
    This checks that the instance is in the cluster and is not running.
2947

2948
    """
2949
    instance = self.cfg.GetInstanceInfo(
2950
      self.cfg.ExpandInstanceName(self.op.instance_name))
2951
    if instance is None:
2952
      raise errors.OpPrereqError("Instance '%s' not known" %
2953
                                 self.op.instance_name)
2954
    _CheckNodeOnline(self, instance.primary_node)
2955

    
2956
    if instance.admin_up:
2957
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2958
                                 self.op.instance_name)
2959
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2960
                                              instance.name,
2961
                                              instance.hypervisor)
2962
    remote_info.Raise()
2963
    if remote_info.data:
2964
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2965
                                 (self.op.instance_name,
2966
                                  instance.primary_node))
2967
    self.instance = instance
2968

    
2969
    # new name verification
2970
    name_info = utils.HostInfo(self.op.new_name)
2971

    
2972
    self.op.new_name = new_name = name_info.name
2973
    instance_list = self.cfg.GetInstanceList()
2974
    if new_name in instance_list:
2975
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2976
                                 new_name)
2977

    
2978
    if not getattr(self.op, "ignore_ip", False):
2979
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
2980
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2981
                                   (name_info.ip, new_name))
2982

    
2983

    
2984
  def Exec(self, feedback_fn):
2985
    """Reinstall the instance.
2986

2987
    """
2988
    inst = self.instance
2989
    old_name = inst.name
2990

    
2991
    if inst.disk_template == constants.DT_FILE:
2992
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2993

    
2994
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2995
    # Change the instance lock. This is definitely safe while we hold the BGL
2996
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
2997
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
2998

    
2999
    # re-read the instance from the configuration after rename
3000
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
3001

    
3002
    if inst.disk_template == constants.DT_FILE:
3003
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3004
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
3005
                                                     old_file_storage_dir,
3006
                                                     new_file_storage_dir)
3007
      result.Raise()
3008
      if not result.data:
3009
        raise errors.OpExecError("Could not connect to node '%s' to rename"
3010
                                 " directory '%s' to '%s' (but the instance"
3011
                                 " has been renamed in Ganeti)" % (
3012
                                 inst.primary_node, old_file_storage_dir,
3013
                                 new_file_storage_dir))
3014

    
3015
      if not result.data[0]:
3016
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
3017
                                 " (but the instance has been renamed in"
3018
                                 " Ganeti)" % (old_file_storage_dir,
3019
                                               new_file_storage_dir))
3020

    
3021
    _StartInstanceDisks(self, inst, None)
3022
    try:
3023
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
3024
                                                 old_name)
3025
      msg = result.RemoteFailMsg()
3026
      if msg:
3027
        msg = ("Could not run OS rename script for instance %s on node %s"
3028
               " (but the instance has been renamed in Ganeti): %s" %
3029
               (inst.name, inst.primary_node, msg))
3030
        self.proc.LogWarning(msg)
3031
    finally:
3032
      _ShutdownInstanceDisks(self, inst)
3033

    
3034

    
3035
class LURemoveInstance(LogicalUnit):
3036
  """Remove an instance.
3037

3038
  """
3039
  HPATH = "instance-remove"
3040
  HTYPE = constants.HTYPE_INSTANCE
3041
  _OP_REQP = ["instance_name", "ignore_failures"]
3042
  REQ_BGL = False
3043

    
3044
  def ExpandNames(self):
3045
    self._ExpandAndLockInstance()
3046
    self.needed_locks[locking.LEVEL_NODE] = []
3047
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3048

    
3049
  def DeclareLocks(self, level):
3050
    if level == locking.LEVEL_NODE:
3051
      self._LockInstancesNodes()
3052

    
3053
  def BuildHooksEnv(self):
3054
    """Build hooks env.
3055

3056
    This runs on master, primary and secondary nodes of the instance.
3057

3058
    """
3059
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3060
    nl = [self.cfg.GetMasterNode()]
3061
    return env, nl, nl
3062

    
3063
  def CheckPrereq(self):
3064
    """Check prerequisites.
3065

3066
    This checks that the instance is in the cluster.
3067

3068
    """
3069
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3070
    assert self.instance is not None, \
3071
      "Cannot retrieve locked instance %s" % self.op.instance_name
3072

    
3073
  def Exec(self, feedback_fn):
3074
    """Remove the instance.
3075

3076
    """
3077
    instance = self.instance
3078
    logging.info("Shutting down instance %s on node %s",
3079
                 instance.name, instance.primary_node)
3080

    
3081
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
3082
    if result.failed or not result.data:
3083
      if self.op.ignore_failures:
3084
        feedback_fn("Warning: can't shutdown instance")
3085
      else:
3086
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
3087
                                 (instance.name, instance.primary_node))
3088

    
3089
    logging.info("Removing block devices for instance %s", instance.name)
3090

    
3091
    if not _RemoveDisks(self, instance):
3092
      if self.op.ignore_failures:
3093
        feedback_fn("Warning: can't remove instance's disks")
3094
      else:
3095
        raise errors.OpExecError("Can't remove instance's disks")
3096

    
3097
    logging.info("Removing instance %s out of cluster config", instance.name)
3098

    
3099
    self.cfg.RemoveInstance(instance.name)
3100
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
3101

    
3102

    
3103
class LUQueryInstances(NoHooksLU):
3104
  """Logical unit for querying instances.
3105

3106
  """
3107
  _OP_REQP = ["output_fields", "names", "use_locking"]
3108
  REQ_BGL = False
3109
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
3110
                                    "admin_state", "admin_ram",
3111
                                    "disk_template", "ip", "mac", "bridge",
3112
                                    "sda_size", "sdb_size", "vcpus", "tags",
3113
                                    "network_port", "beparams",
3114
                                    "(disk).(size)/([0-9]+)",
3115
                                    "(disk).(sizes)", "disk_usage",
3116
                                    "(nic).(mac|ip|bridge)/([0-9]+)",
3117
                                    "(nic).(macs|ips|bridges)",
3118
                                    "(disk|nic).(count)",
3119
                                    "serial_no", "hypervisor", "hvparams",] +
3120
                                  ["hv/%s" % name
3121
                                   for name in constants.HVS_PARAMETERS] +
3122
                                  ["be/%s" % name
3123
                                   for name in constants.BES_PARAMETERS])
3124
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
3125

    
3126

    
3127
  def ExpandNames(self):
3128
    _CheckOutputFields(static=self._FIELDS_STATIC,
3129
                       dynamic=self._FIELDS_DYNAMIC,
3130
                       selected=self.op.output_fields)
3131

    
3132
    self.needed_locks = {}
3133
    self.share_locks[locking.LEVEL_INSTANCE] = 1
3134
    self.share_locks[locking.LEVEL_NODE] = 1
3135

    
3136
    if self.op.names:
3137
      self.wanted = _GetWantedInstances(self, self.op.names)
3138
    else:
3139
      self.wanted = locking.ALL_SET
3140

    
3141
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
3142
    self.do_locking = self.do_node_query and self.op.use_locking
3143
    if self.do_locking:
3144
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3145
      self.needed_locks[locking.LEVEL_NODE] = []
3146
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3147

    
3148
  def DeclareLocks(self, level):
3149
    if level == locking.LEVEL_NODE and self.do_locking:
3150
      self._LockInstancesNodes()
3151

    
3152
  def CheckPrereq(self):
3153
    """Check prerequisites.
3154

3155
    """
3156
    pass
3157

    
3158
  def Exec(self, feedback_fn):
3159
    """Computes the list of nodes and their attributes.
3160

3161
    """
3162
    all_info = self.cfg.GetAllInstancesInfo()
3163
    if self.wanted == locking.ALL_SET:
3164
      # caller didn't specify instance names, so ordering is not important
3165
      if self.do_locking:
3166
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
3167
      else:
3168
        instance_names = all_info.keys()
3169
      instance_names = utils.NiceSort(instance_names)
3170
    else:
3171
      # caller did specify names, so we must keep the ordering
3172
      if self.do_locking:
3173
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
3174
      else:
3175
        tgt_set = all_info.keys()
3176
      missing = set(self.wanted).difference(tgt_set)
3177
      if missing:
3178
        raise errors.OpExecError("Some instances were removed before"
3179
                                 " retrieving their data: %s" % missing)
3180
      instance_names = self.wanted
3181

    
3182
    instance_list = [all_info[iname] for iname in instance_names]
3183

    
3184
    # begin data gathering
3185

    
3186
    nodes = frozenset([inst.primary_node for inst in instance_list])
3187
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
3188

    
3189
    bad_nodes = []
3190
    off_nodes = []
3191
    if self.do_node_query:
3192
      live_data = {}
3193
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
3194
      for name in nodes:
3195
        result = node_data[name]
3196
        if result.offline:
3197
          # offline nodes will be in both lists
3198
          off_nodes.append(name)
3199
        if result.failed:
3200
          bad_nodes.append(name)
3201
        else:
3202
          if result.data:
3203
            live_data.update(result.data)
3204
            # else no instance is alive
3205
    else:
3206
      live_data = dict([(name, {}) for name in instance_names])
3207

    
3208
    # end data gathering
3209

    
3210
    HVPREFIX = "hv/"
3211
    BEPREFIX = "be/"
3212
    output = []
3213
    for instance in instance_list:
3214
      iout = []
3215
      i_hv = self.cfg.GetClusterInfo().FillHV(instance)
3216
      i_be = self.cfg.GetClusterInfo().FillBE(instance)
3217
      for field in self.op.output_fields:
3218
        st_match = self._FIELDS_STATIC.Matches(field)
3219
        if field == "name":
3220
          val = instance.name
3221
        elif field == "os":
3222
          val = instance.os
3223
        elif field == "pnode":
3224
          val = instance.primary_node
3225
        elif field == "snodes":
3226
          val = list(instance.secondary_nodes)
3227
        elif field == "admin_state":
3228
          val = instance.admin_up
3229
        elif field == "oper_state":
3230
          if instance.primary_node in bad_nodes:
3231
            val = None
3232
          else:
3233
            val = bool(live_data.get(instance.name))
3234
        elif field == "status":
3235
          if instance.primary_node in off_nodes:
3236
            val = "ERROR_nodeoffline"
3237
          elif instance.primary_node in bad_nodes:
3238
            val = "ERROR_nodedown"
3239
          else:
3240
            running = bool(live_data.get(instance.name))
3241
            if running:
3242
              if instance.admin_up:
3243
                val = "running"
3244
              else:
3245
                val = "ERROR_up"
3246
            else:
3247
              if instance.admin_up:
3248
                val = "ERROR_down"
3249
              else:
3250
                val = "ADMIN_down"
3251
        elif field == "oper_ram":
3252
          if instance.primary_node in bad_nodes:
3253
            val = None
3254
          elif instance.name in live_data:
3255
            val = live_data[instance.name].get("memory", "?")
3256
          else:
3257
            val = "-"
3258
        elif field == "disk_template":
3259
          val = instance.disk_template
3260
        elif field == "ip":
3261
          val = instance.nics[0].ip
3262
        elif field == "bridge":
3263
          val = instance.nics[0].bridge
3264
        elif field == "mac":
3265
          val = instance.nics[0].mac
3266
        elif field == "sda_size" or field == "sdb_size":
3267
          idx = ord(field[2]) - ord('a')
3268
          try:
3269
            val = instance.FindDisk(idx).size
3270
          except errors.OpPrereqError:
3271
            val = None
3272
        elif field == "disk_usage": # total disk usage per node
3273
          disk_sizes = [{'size': disk.size} for disk in instance.disks]
3274
          val = _ComputeDiskSize(instance.disk_template, disk_sizes)
3275
        elif field == "tags":
3276
          val = list(instance.GetTags())
3277
        elif field == "serial_no":
3278
          val = instance.serial_no
3279
        elif field == "network_port":
3280
          val = instance.network_port
3281
        elif field == "hypervisor":
3282
          val = instance.hypervisor
3283
        elif field == "hvparams":
3284
          val = i_hv
3285
        elif (field.startswith(HVPREFIX) and
3286
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
3287
          val = i_hv.get(field[len(HVPREFIX):], None)
3288
        elif field == "beparams":
3289
          val = i_be
3290
        elif (field.startswith(BEPREFIX) and
3291
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
3292
          val = i_be.get(field[len(BEPREFIX):], None)
3293
        elif st_match and st_match.groups():
3294
          # matches a variable list
3295
          st_groups = st_match.groups()
3296
          if st_groups and st_groups[0] == "disk":
3297
            if st_groups[1] == "count":
3298
              val = len(instance.disks)
3299
            elif st_groups[1] == "sizes":
3300
              val = [disk.size for disk in instance.disks]
3301
            elif st_groups[1] == "size":
3302
              try:
3303
                val = instance.FindDisk(st_groups[2]).size
3304
              except errors.OpPrereqError:
3305
                val = None
3306
            else:
3307
              assert False, "Unhandled disk parameter"
3308
          elif st_groups[0] == "nic":
3309
            if st_groups[1] == "count":
3310
              val = len(instance.nics)
3311
            elif st_groups[1] == "macs":
3312
              val = [nic.mac for nic in instance.nics]
3313
            elif st_groups[1] == "ips":
3314
              val = [nic.ip for nic in instance.nics]
3315
            elif st_groups[1] == "bridges":
3316
              val = [nic.bridge for nic in instance.nics]
3317
            else:
3318
              # index-based item
3319
              nic_idx = int(st_groups[2])
3320
              if nic_idx >= len(instance.nics):
3321
                val = None
3322
              else:
3323
                if st_groups[1] == "mac":
3324
                  val = instance.nics[nic_idx].mac
3325
                elif st_groups[1] == "ip":
3326
                  val = instance.nics[nic_idx].ip
3327
                elif st_groups[1] == "bridge":
3328
                  val = instance.nics[nic_idx].bridge
3329
                else:
3330
                  assert False, "Unhandled NIC parameter"
3331
          else:
3332
            assert False, "Unhandled variable parameter"
3333
        else:
3334
          raise errors.ParameterError(field)
3335
        iout.append(val)
3336
      output.append(iout)
3337

    
3338
    return output
3339

    
3340

    
3341
class LUFailoverInstance(LogicalUnit):
3342
  """Failover an instance.
3343

3344
  """
3345
  HPATH = "instance-failover"
3346
  HTYPE = constants.HTYPE_INSTANCE
3347
  _OP_REQP = ["instance_name", "ignore_consistency"]
3348
  REQ_BGL = False
3349

    
3350
  def ExpandNames(self):
3351
    self._ExpandAndLockInstance()
3352
    self.needed_locks[locking.LEVEL_NODE] = []
3353
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3354

    
3355
  def DeclareLocks(self, level):
3356
    if level == locking.LEVEL_NODE:
3357
      self._LockInstancesNodes()
3358

    
3359
  def BuildHooksEnv(self):
3360
    """Build hooks env.
3361

3362
    This runs on master, primary and secondary nodes of the instance.
3363

3364
    """
3365
    env = {
3366
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
3367
      }
3368
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3369
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3370
    return env, nl, nl
3371

    
3372
  def CheckPrereq(self):
3373
    """Check prerequisites.
3374

3375
    This checks that the instance is in the cluster.
3376

3377
    """
3378
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3379
    assert self.instance is not None, \
3380
      "Cannot retrieve locked instance %s" % self.op.instance_name
3381

    
3382
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3383
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3384
      raise errors.OpPrereqError("Instance's disk layout is not"
3385
                                 " network mirrored, cannot failover.")
3386

    
3387
    secondary_nodes = instance.secondary_nodes
3388
    if not secondary_nodes:
3389
      raise errors.ProgrammerError("no secondary node but using "
3390
                                   "a mirrored disk template")
3391

    
3392
    target_node = secondary_nodes[0]
3393
    _CheckNodeOnline(self, target_node)
3394
    _CheckNodeNotDrained(self, target_node)
3395
    # check memory requirements on the secondary node
3396
    _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
3397
                         instance.name, bep[constants.BE_MEMORY],
3398
                         instance.hypervisor)
3399

    
3400
    # check bridge existance
3401
    brlist = [nic.bridge for nic in instance.nics]
3402
    result = self.rpc.call_bridges_exist(target_node, brlist)
3403
    result.Raise()
3404
    if not result.data:
3405
      raise errors.OpPrereqError("One or more target bridges %s does not"
3406
                                 " exist on destination node '%s'" %
3407
                                 (brlist, target_node))
3408

    
3409
  def Exec(self, feedback_fn):
3410
    """Failover an instance.
3411

3412
    The failover is done by shutting it down on its present node and
3413
    starting it on the secondary.
3414

3415
    """
3416
    instance = self.instance
3417

    
3418
    source_node = instance.primary_node
3419
    target_node = instance.secondary_nodes[0]
3420

    
3421
    feedback_fn("* checking disk consistency between source and target")
3422
    for dev in instance.disks:
3423
      # for drbd, these are drbd over lvm
3424
      if not _CheckDiskConsistency(self, dev, target_node, False):
3425
        if instance.admin_up and not self.op.ignore_consistency:
3426
          raise errors.OpExecError("Disk %s is degraded on target node,"
3427
                                   " aborting failover." % dev.iv_name)
3428

    
3429
    feedback_fn("* shutting down instance on source node")
3430
    logging.info("Shutting down instance %s on node %s",
3431
                 instance.name, source_node)
3432

    
3433
    result = self.rpc.call_instance_shutdown(source_node, instance)
3434
    if result.failed or not result.data:
3435
      if self.op.ignore_consistency:
3436
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
3437
                             " Proceeding"
3438
                             " anyway. Please make sure node %s is down",
3439
                             instance.name, source_node, source_node)
3440
      else:
3441
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
3442
                                 (instance.name, source_node))
3443

    
3444
    feedback_fn("* deactivating the instance's disks on source node")
3445
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3446
      raise errors.OpExecError("Can't shut down the instance's disks.")
3447

    
3448
    instance.primary_node = target_node
3449
    # distribute new instance config to the other nodes
3450
    self.cfg.Update(instance)
3451

    
3452
    # Only start the instance if it's marked as up
3453
    if instance.admin_up:
3454
      feedback_fn("* activating the instance's disks on target node")
3455
      logging.info("Starting instance %s on node %s",
3456
                   instance.name, target_node)
3457

    
3458
      disks_ok, dummy = _AssembleInstanceDisks(self, instance,
3459
                                               ignore_secondaries=True)
3460
      if not disks_ok:
3461
        _ShutdownInstanceDisks(self, instance)
3462
        raise errors.OpExecError("Can't activate the instance's disks")
3463

    
3464
      feedback_fn("* starting the instance on the target node")
3465
      result = self.rpc.call_instance_start(target_node, instance, None)
3466
      msg = result.RemoteFailMsg()
3467
      if msg:
3468
        _ShutdownInstanceDisks(self, instance)
3469
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
3470
                                 (instance.name, target_node, msg))
3471

    
3472

    
3473
class LUMigrateInstance(LogicalUnit):
3474
  """Migrate an instance.
3475

3476
  This is migration without shutting down, compared to the failover,
3477
  which is done with shutdown.
3478

3479
  """
3480
  HPATH = "instance-migrate"
3481
  HTYPE = constants.HTYPE_INSTANCE
3482
  _OP_REQP = ["instance_name", "live", "cleanup"]
3483

    
3484
  REQ_BGL = False
3485

    
3486
  def ExpandNames(self):
3487
    self._ExpandAndLockInstance()
3488
    self.needed_locks[locking.LEVEL_NODE] = []
3489
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3490

    
3491
  def DeclareLocks(self, level):
3492
    if level == locking.LEVEL_NODE:
3493
      self._LockInstancesNodes()
3494

    
3495
  def BuildHooksEnv(self):
3496
    """Build hooks env.
3497

3498
    This runs on master, primary and secondary nodes of the instance.
3499

3500
    """
3501
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3502
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3503
    return env, nl, nl
3504

    
3505
  def CheckPrereq(self):
3506
    """Check prerequisites.
3507

3508
    This checks that the instance is in the cluster.
3509

3510
    """
3511
    instance = self.cfg.GetInstanceInfo(
3512
      self.cfg.ExpandInstanceName(self.op.instance_name))
3513
    if instance is None:
3514
      raise errors.OpPrereqError("Instance '%s' not known" %
3515
                                 self.op.instance_name)
3516

    
3517
    if instance.disk_template != constants.DT_DRBD8:
3518
      raise errors.OpPrereqError("Instance's disk layout is not"
3519
                                 " drbd8, cannot migrate.")
3520

    
3521
    secondary_nodes = instance.secondary_nodes
3522
    if not secondary_nodes:
3523
      raise errors.ConfigurationError("No secondary node but using"
3524
                                      " drbd8 disk template")
3525

    
3526
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
3527

    
3528
    target_node = secondary_nodes[0]
3529
    # check memory requirements on the secondary node
3530
    _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
3531
                         instance.name, i_be[constants.BE_MEMORY],
3532
                         instance.hypervisor)
3533

    
3534
    # check bridge existance
3535
    brlist = [nic.bridge for nic in instance.nics]
3536
    result = self.rpc.call_bridges_exist(target_node, brlist)
3537
    if result.failed or not result.data:
3538
      raise errors.OpPrereqError("One or more target bridges %s does not"
3539
                                 " exist on destination node '%s'" %
3540
                                 (brlist, target_node))
3541

    
3542
    if not self.op.cleanup:
3543
      _CheckNodeNotDrained(self, target_node)
3544
      result = self.rpc.call_instance_migratable(instance.primary_node,
3545
                                                 instance)
3546
      msg = result.RemoteFailMsg()
3547
      if msg:
3548
        raise errors.OpPrereqError("Can't migrate: %s - please use failover" %
3549
                                   msg)
3550

    
3551
    self.instance = instance
3552

    
3553
  def _WaitUntilSync(self):
3554
    """Poll with custom rpc for disk sync.
3555

3556
    This uses our own step-based rpc call.
3557

3558
    """
3559
    self.feedback_fn("* wait until resync is done")
3560
    all_done = False
3561
    while not all_done:
3562
      all_done = True
3563
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
3564
                                            self.nodes_ip,
3565
                                            self.instance.disks)
3566
      min_percent = 100
3567
      for node, nres in result.items():
3568
        msg = nres.RemoteFailMsg()
3569
        if msg:
3570
          raise errors.OpExecError("Cannot resync disks on node %s: %s" %
3571
                                   (node, msg))
3572
        node_done, node_percent = nres.payload
3573
        all_done = all_done and node_done
3574
        if node_percent is not None:
3575
          min_percent = min(min_percent, node_percent)
3576
      if not all_done:
3577
        if min_percent < 100:
3578
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
3579
        time.sleep(2)
3580

    
3581
  def _EnsureSecondary(self, node):
3582
    """Demote a node to secondary.
3583

3584
    """
3585
    self.feedback_fn("* switching node %s to secondary mode" % node)
3586

    
3587
    for dev in self.instance.disks:
3588
      self.cfg.SetDiskID(dev, node)
3589

    
3590
    result = self.rpc.call_blockdev_close(node, self.instance.name,
3591
                                          self.instance.disks)
3592
    msg = result.RemoteFailMsg()
3593
    if msg:
3594
      raise errors.OpExecError("Cannot change disk to secondary on node %s,"
3595
                               " error %s" % (node, msg))
3596

    
3597
  def _GoStandalone(self):
3598
    """Disconnect from the network.
3599

3600
    """
3601
    self.feedback_fn("* changing into standalone mode")
3602
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
3603
                                               self.instance.disks)
3604
    for node, nres in result.items():
3605
      msg = nres.RemoteFailMsg()
3606
      if msg:
3607
        raise errors.OpExecError("Cannot disconnect disks node %s,"
3608
                                 " error %s" % (node, msg))
3609

    
3610
  def _GoReconnect(self, multimaster):
3611
    """Reconnect to the network.
3612

3613
    """
3614
    if multimaster:
3615
      msg = "dual-master"
3616
    else:
3617
      msg = "single-master"
3618
    self.feedback_fn("* changing disks into %s mode" % msg)
3619
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
3620
                                           self.instance.disks,
3621
                                           self.instance.name, multimaster)
3622
    for node, nres in result.items():
3623
      msg = nres.RemoteFailMsg()
3624
      if msg:
3625
        raise errors.OpExecError("Cannot change disks config on node %s,"
3626
                                 " error: %s" % (node, msg))
3627

    
3628
  def _ExecCleanup(self):
3629
    """Try to cleanup after a failed migration.
3630

3631
    The cleanup is done by:
3632
      - check that the instance is running only on one node
3633
        (and update the config if needed)
3634
      - change disks on its secondary node to secondary
3635
      - wait until disks are fully synchronized
3636
      - disconnect from the network
3637
      - change disks into single-master mode
3638
      - wait again until disks are fully synchronized
3639

3640
    """
3641
    instance = self.instance
3642
    target_node = self.target_node
3643
    source_node = self.source_node
3644

    
3645
    # check running on only one node
3646
    self.feedback_fn("* checking where the instance actually runs"
3647
                     " (if this hangs, the hypervisor might be in"
3648
                     " a bad state)")
3649
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
3650
    for node, result in ins_l.items():
3651
      result.Raise()
3652
      if not isinstance(result.data, list):
3653
        raise errors.OpExecError("Can't contact node '%s'" % node)
3654

    
3655
    runningon_source = instance.name in ins_l[source_node].data
3656
    runningon_target = instance.name in ins_l[target_node].data
3657

    
3658
    if runningon_source and runningon_target:
3659
      raise errors.OpExecError("Instance seems to be running on two nodes,"
3660
                               " or the hypervisor is confused. You will have"
3661
                               " to ensure manually that it runs only on one"
3662
                               " and restart this operation.")
3663

    
3664
    if not (runningon_source or runningon_target):
3665
      raise errors.OpExecError("Instance does not seem to be running at all."
3666
                               " In this case, it's safer to repair by"
3667
                               " running 'gnt-instance stop' to ensure disk"
3668
                               " shutdown, and then restarting it.")
3669

    
3670
    if runningon_target:
3671
      # the migration has actually succeeded, we need to update the config
3672
      self.feedback_fn("* instance running on secondary node (%s),"
3673
                       " updating config" % target_node)
3674
      instance.primary_node = target_node
3675
      self.cfg.Update(instance)
3676
      demoted_node = source_node
3677
    else:
3678
      self.feedback_fn("* instance confirmed to be running on its"
3679
                       " primary node (%s)" % source_node)
3680
      demoted_node = target_node
3681

    
3682
    self._EnsureSecondary(demoted_node)
3683
    try:
3684
      self._WaitUntilSync()
3685
    except errors.OpExecError:
3686
      # we ignore here errors, since if the device is standalone, it
3687
      # won't be able to sync
3688
      pass
3689
    self._GoStandalone()
3690
    self._GoReconnect(False)
3691
    self._WaitUntilSync()
3692

    
3693
    self.feedback_fn("* done")
3694

    
3695
  def _RevertDiskStatus(self):
3696
    """Try to revert the disk status after a failed migration.
3697

3698
    """
3699
    target_node = self.target_node
3700
    try:
3701
      self._EnsureSecondary(target_node)
3702
      self._GoStandalone()
3703
      self._GoReconnect(False)
3704
      self._WaitUntilSync()
3705
    except errors.OpExecError, err:
3706
      self.LogWarning("Migration failed and I can't reconnect the"
3707
                      " drives: error '%s'\n"
3708
                      "Please look and recover the instance status" %
3709
                      str(err))
3710

    
3711
  def _AbortMigration(self):
3712
    """Call the hypervisor code to abort a started migration.
3713

3714
    """
3715
    instance = self.instance
3716
    target_node = self.target_node
3717
    migration_info = self.migration_info
3718

    
3719
    abort_result = self.rpc.call_finalize_migration(target_node,
3720
                                                    instance,
3721
                                                    migration_info,
3722
                                                    False)
3723
    abort_msg = abort_result.RemoteFailMsg()
3724
    if abort_msg:
3725
      logging.error("Aborting migration failed on target node %s: %s" %
3726
                    (target_node, abort_msg))
3727
      # Don't raise an exception here, as we stil have to try to revert the
3728
      # disk status, even if this step failed.
3729

    
3730
  def _ExecMigration(self):
3731
    """Migrate an instance.
3732

3733
    The migrate is done by:
3734
      - change the disks into dual-master mode
3735
      - wait until disks are fully synchronized again
3736
      - migrate the instance
3737
      - change disks on the new secondary node (the old primary) to secondary
3738
      - wait until disks are fully synchronized
3739
      - change disks into single-master mode
3740

3741
    """
3742
    instance = self.instance
3743
    target_node = self.target_node
3744
    source_node = self.source_node
3745

    
3746
    self.feedback_fn("* checking disk consistency between source and target")
3747
    for dev in instance.disks:
3748
      if not _CheckDiskConsistency(self, dev, target_node, False):
3749
        raise errors.OpExecError("Disk %s is degraded or not fully"
3750
                                 " synchronized on target node,"
3751
                                 " aborting migrate." % dev.iv_name)
3752

    
3753
    # First get the migration information from the remote node
3754
    result = self.rpc.call_migration_info(source_node, instance)
3755
    msg = result.RemoteFailMsg()
3756
    if msg:
3757
      log_err = ("Failed fetching source migration information from %s: %s" %
3758
                 (source_node, msg))
3759
      logging.error(log_err)
3760
      raise errors.OpExecError(log_err)
3761

    
3762
    self.migration_info = migration_info = result.payload
3763

    
3764
    # Then switch the disks to master/master mode
3765
    self._EnsureSecondary(target_node)
3766
    self._GoStandalone()
3767
    self._GoReconnect(True)
3768
    self._WaitUntilSync()
3769

    
3770
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
3771
    result = self.rpc.call_accept_instance(target_node,
3772
                                           instance,
3773
                                           migration_info,
3774
                                           self.nodes_ip[target_node])
3775

    
3776
    msg = result.RemoteFailMsg()
3777
    if msg:
3778
      logging.error("Instance pre-migration failed, trying to revert"
3779
                    " disk status: %s", msg)
3780
      self._AbortMigration()
3781
      self._RevertDiskStatus()
3782
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
3783
                               (instance.name, msg))
3784

    
3785
    self.feedback_fn("* migrating instance to %s" % target_node)
3786
    time.sleep(10)
3787
    result = self.rpc.call_instance_migrate(source_node, instance,
3788
                                            self.nodes_ip[target_node],
3789
                                            self.op.live)
3790
    msg = result.RemoteFailMsg()
3791
    if msg:
3792
      logging.error("Instance migration failed, trying to revert"
3793
                    " disk status: %s", msg)
3794
      self._AbortMigration()
3795
      self._RevertDiskStatus()
3796
      raise errors.OpExecError("Could not migrate instance %s: %s" %
3797
                               (instance.name, msg))
3798
    time.sleep(10)
3799

    
3800
    instance.primary_node = target_node
3801
    # distribute new instance config to the other nodes
3802
    self.cfg.Update(instance)
3803

    
3804
    result = self.rpc.call_finalize_migration(target_node,
3805
                                              instance,
3806
                                              migration_info,
3807
                                              True)
3808
    msg = result.RemoteFailMsg()
3809
    if msg:
3810
      logging.error("Instance migration succeeded, but finalization failed:"
3811
                    " %s" % msg)
3812
      raise errors.OpExecError("Could not finalize instance migration: %s" %
3813
                               msg)
3814

    
3815
    self._EnsureSecondary(source_node)
3816
    self._WaitUntilSync()
3817
    self._GoStandalone()
3818
    self._GoReconnect(False)
3819
    self._WaitUntilSync()
3820

    
3821
    self.feedback_fn("* done")
3822

    
3823
  def Exec(self, feedback_fn):
3824
    """Perform the migration.
3825

3826
    """
3827
    self.feedback_fn = feedback_fn
3828

    
3829
    self.source_node = self.instance.primary_node
3830
    self.target_node = self.instance.secondary_nodes[0]
3831
    self.all_nodes = [self.source_node, self.target_node]
3832
    self.nodes_ip = {
3833
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
3834
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
3835
      }
3836
    if self.op.cleanup:
3837
      return self._ExecCleanup()
3838
    else:
3839
      return self._ExecMigration()
3840

    
3841

    
3842
def _CreateBlockDev(lu, node, instance, device, force_create,
3843
                    info, force_open):
3844
  """Create a tree of block devices on a given node.
3845

3846
  If this device type has to be created on secondaries, create it and
3847
  all its children.
3848

3849
  If not, just recurse to children keeping the same 'force' value.
3850

3851
  @param lu: the lu on whose behalf we execute
3852
  @param node: the node on which to create the device
3853
  @type instance: L{objects.Instance}
3854
  @param instance: the instance which owns the device
3855
  @type device: L{objects.Disk}
3856
  @param device: the device to create
3857
  @type force_create: boolean
3858
  @param force_create: whether to force creation of this device; this
3859
      will be change to True whenever we find a device which has
3860
      CreateOnSecondary() attribute
3861
  @param info: the extra 'metadata' we should attach to the device
3862
      (this will be represented as a LVM tag)
3863
  @type force_open: boolean
3864
  @param force_open: this parameter will be passes to the
3865
      L{backend.BlockdevCreate} function where it specifies
3866
      whether we run on primary or not, and it affects both
3867
      the child assembly and the device own Open() execution
3868

3869
  """
3870
  if device.CreateOnSecondary():
3871
    force_create = True
3872

    
3873
  if device.children:
3874
    for child in device.children:
3875
      _CreateBlockDev(lu, node, instance, child, force_create,
3876
                      info, force_open)
3877

    
3878
  if not force_create:
3879
    return
3880

    
3881
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
3882

    
3883

    
3884
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
3885
  """Create a single block device on a given node.
3886

3887
  This will not recurse over children of the device, so they must be
3888
  created in advance.
3889

3890
  @param lu: the lu on whose behalf we execute
3891
  @param node: the node on which to create the device
3892
  @type instance: L{objects.Instance}
3893
  @param instance: the instance which owns the device
3894
  @type device: L{objects.Disk}
3895
  @param device: the device to create
3896
  @param info: the extra 'metadata' we should attach to the device
3897
      (this will be represented as a LVM tag)
3898
  @type force_open: boolean
3899
  @param force_open: this parameter will be passes to the
3900
      L{backend.BlockdevCreate} function where it specifies
3901
      whether we run on primary or not, and it affects both
3902
      the child assembly and the device own Open() execution
3903

3904
  """
3905
  lu.cfg.SetDiskID(device, node)
3906
  result = lu.rpc.call_blockdev_create(node, device, device.size,
3907
                                       instance.name, force_open, info)
3908
  msg = result.RemoteFailMsg()
3909
  if msg:
3910
    raise errors.OpExecError("Can't create block device %s on"
3911
                             " node %s for instance %s: %s" %
3912
                             (device, node, instance.name, msg))
3913
  if device.physical_id is None:
3914
    device.physical_id = result.payload
3915

    
3916

    
3917
def _GenerateUniqueNames(lu, exts):
3918
  """Generate a suitable LV name.
3919

3920
  This will generate a logical volume name for the given instance.
3921

3922
  """
3923
  results = []
3924
  for val in exts:
3925
    new_id = lu.cfg.GenerateUniqueID()
3926
    results.append("%s%s" % (new_id, val))
3927
  return results
3928

    
3929

    
3930
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
3931
                         p_minor, s_minor):
3932
  """Generate a drbd8 device complete with its children.
3933

3934
  """
3935
  port = lu.cfg.AllocatePort()
3936
  vgname = lu.cfg.GetVGName()
3937
  shared_secret = lu.cfg.GenerateDRBDSecret()
3938
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3939
                          logical_id=(vgname, names[0]))
3940
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3941
                          logical_id=(vgname, names[1]))
3942
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
3943
                          logical_id=(primary, secondary, port,
3944
                                      p_minor, s_minor,
3945
                                      shared_secret),
3946
                          children=[dev_data, dev_meta],
3947
                          iv_name=iv_name)
3948
  return drbd_dev
3949

    
3950

    
3951
def _GenerateDiskTemplate(lu, template_name,
3952
                          instance_name, primary_node,
3953
                          secondary_nodes, disk_info,
3954
                          file_storage_dir, file_driver,
3955
                          base_index):
3956
  """Generate the entire disk layout for a given template type.
3957

3958
  """
3959
  #TODO: compute space requirements
3960

    
3961
  vgname = lu.cfg.GetVGName()
3962
  disk_count = len(disk_info)
3963
  disks = []
3964
  if template_name == constants.DT_DISKLESS:
3965
    pass
3966
  elif template_name == constants.DT_PLAIN:
3967
    if len(secondary_nodes) != 0:
3968
      raise errors.ProgrammerError("Wrong template configuration")
3969

    
3970
    names = _GenerateUniqueNames(lu, [".disk%d" % i
3971
                                      for i in range(disk_count)])
3972
    for idx, disk in enumerate(disk_info):
3973
      disk_index = idx + base_index
3974
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
3975
                              logical_id=(vgname, names[idx]),
3976
                              iv_name="disk/%d" % disk_index,
3977
                              mode=disk["mode"])
3978
      disks.append(disk_dev)
3979
  elif template_name == constants.DT_DRBD8:
3980
    if len(secondary_nodes) != 1:
3981
      raise errors.ProgrammerError("Wrong template configuration")
3982
    remote_node = secondary_nodes[0]
3983
    minors = lu.cfg.AllocateDRBDMinor(
3984
      [primary_node, remote_node] * len(disk_info), instance_name)
3985

    
3986
    names = []
3987
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % i
3988
                                               for i in range(disk_count)]):
3989
      names.append(lv_prefix + "_data")
3990
      names.append(lv_prefix + "_meta")
3991
    for idx, disk in enumerate(disk_info):
3992
      disk_index = idx + base_index
3993
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
3994
                                      disk["size"], names[idx*2:idx*2+2],
3995
                                      "disk/%d" % disk_index,
3996
                                      minors[idx*2], minors[idx*2+1])
3997
      disk_dev.mode = disk["mode"]
3998
      disks.append(disk_dev)
3999
  elif template_name == constants.DT_FILE:
4000
    if len(secondary_nodes) != 0:
4001
      raise errors.ProgrammerError("Wrong template configuration")
4002

    
4003
    for idx, disk in enumerate(disk_info):
4004
      disk_index = idx + base_index
4005
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
4006
                              iv_name="disk/%d" % disk_index,
4007
                              logical_id=(file_driver,
4008
                                          "%s/disk%d" % (file_storage_dir,
4009
                                                         idx)),
4010
                              mode=disk["mode"])
4011
      disks.append(disk_dev)
4012
  else:
4013
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
4014
  return disks
4015

    
4016

    
4017
def _GetInstanceInfoText(instance):
4018
  """Compute that text that should be added to the disk's metadata.
4019

4020
  """
4021
  return "originstname+%s" % instance.name
4022

    
4023

    
4024
def _CreateDisks(lu, instance):
4025
  """Create all disks for an instance.
4026

4027
  This abstracts away some work from AddInstance.
4028

4029
  @type lu: L{LogicalUnit}
4030
  @param lu: the logical unit on whose behalf we execute
4031
  @type instance: L{objects.Instance}
4032
  @param instance: the instance whose disks we should create
4033
  @rtype: boolean
4034
  @return: the success of the creation
4035

4036
  """
4037
  info = _GetInstanceInfoText(instance)
4038
  pnode = instance.primary_node
4039

    
4040
  if instance.disk_template == constants.DT_FILE:
4041
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4042
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
4043

    
4044
    if result.failed or not result.data:
4045
      raise errors.OpExecError("Could not connect to node '%s'" % pnode)
4046

    
4047
    if not result.data[0]:
4048
      raise errors.OpExecError("Failed to create directory '%s'" %
4049
                               file_storage_dir)
4050

    
4051
  # Note: this needs to be kept in sync with adding of disks in
4052
  # LUSetInstanceParams
4053
  for device in instance.disks:
4054
    logging.info("Creating volume %s for instance %s",
4055
                 device.iv_name, instance.name)
4056
    #HARDCODE
4057
    for node in instance.all_nodes:
4058
      f_create = node == pnode
4059
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
4060

    
4061

    
4062
def _RemoveDisks(lu, instance):
4063
  """Remove all disks for an instance.
4064

4065
  This abstracts away some work from `AddInstance()` and
4066
  `RemoveInstance()`. Note that in case some of the devices couldn't
4067
  be removed, the removal will continue with the other ones (compare
4068
  with `_CreateDisks()`).
4069

4070
  @type lu: L{LogicalUnit}
4071
  @param lu: the logical unit on whose behalf we execute
4072
  @type instance: L{objects.Instance}
4073
  @param instance: the instance whose disks we should remove
4074
  @rtype: boolean
4075
  @return: the success of the removal
4076

4077
  """
4078
  logging.info("Removing block devices for instance %s", instance.name)
4079

    
4080
  all_result = True
4081
  for device in instance.disks:
4082
    for node, disk in device.ComputeNodeTree(instance.primary_node):
4083
      lu.cfg.SetDiskID(disk, node)
4084
      msg = lu.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
4085
      if msg:
4086
        lu.LogWarning("Could not remove block device %s on node %s,"
4087
                      " continuing anyway: %s", device.iv_name, node, msg)
4088
        all_result = False
4089

    
4090
  if instance.disk_template == constants.DT_FILE:
4091
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4092
    result = lu.rpc.call_file_storage_dir_remove(instance.primary_node,
4093
                                                 file_storage_dir)
4094
    if result.failed or not result.data:
4095
      logging.error("Could not remove directory '%s'", file_storage_dir)
4096
      all_result = False
4097

    
4098
  return all_result
4099

    
4100

    
4101
def _ComputeDiskSize(disk_template, disks):
4102
  """Compute disk size requirements in the volume group
4103

4104
  """
4105
  # Required free disk space as a function of disk and swap space
4106
  req_size_dict = {
4107
    constants.DT_DISKLESS: None,
4108
    constants.DT_PLAIN: sum(d["size"] for d in disks),
4109
    # 128 MB are added for drbd metadata for each disk
4110
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
4111
    constants.DT_FILE: None,
4112
  }
4113

    
4114
  if disk_template not in req_size_dict:
4115
    raise errors.ProgrammerError("Disk template '%s' size requirement"
4116
                                 " is unknown" %  disk_template)
4117

    
4118
  return req_size_dict[disk_template]
4119

    
4120

    
4121
def _CheckHVParams(lu, nodenames, hvname, hvparams):
4122
  """Hypervisor parameter validation.
4123

4124
  This function abstract the hypervisor parameter validation to be
4125
  used in both instance create and instance modify.
4126

4127
  @type lu: L{LogicalUnit}
4128
  @param lu: the logical unit for which we check
4129
  @type nodenames: list
4130
  @param nodenames: the list of nodes on which we should check
4131
  @type hvname: string
4132
  @param hvname: the name of the hypervisor we should use
4133
  @type hvparams: dict
4134
  @param hvparams: the parameters which we need to check
4135
  @raise errors.OpPrereqError: if the parameters are not valid
4136

4137
  """
4138
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
4139
                                                  hvname,
4140
                                                  hvparams)
4141
  for node in nodenames:
4142
    info = hvinfo[node]
4143
    if info.offline:
4144
      continue
4145
    msg = info.RemoteFailMsg()
4146
    if msg:
4147
      raise errors.OpPrereqError("Hypervisor parameter validation failed:"
4148
                                 " %s" % msg)
4149

    
4150

    
4151
class LUCreateInstance(LogicalUnit):
4152
  """Create an instance.
4153

4154
  """
4155
  HPATH = "instance-add"
4156
  HTYPE = constants.HTYPE_INSTANCE
4157
  _OP_REQP = ["instance_name", "disks", "disk_template",
4158
              "mode", "start",
4159
              "wait_for_sync", "ip_check", "nics",
4160
              "hvparams", "beparams"]
4161
  REQ_BGL = False
4162

    
4163
  def _ExpandNode(self, node):
4164
    """Expands and checks one node name.
4165

4166
    """
4167
    node_full = self.cfg.ExpandNodeName(node)
4168
    if node_full is None:
4169
      raise errors.OpPrereqError("Unknown node %s" % node)
4170
    return node_full
4171

    
4172
  def ExpandNames(self):
4173
    """ExpandNames for CreateInstance.
4174

4175
    Figure out the right locks for instance creation.
4176

4177
    """
4178
    self.needed_locks = {}
4179

    
4180
    # set optional parameters to none if they don't exist
4181
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
4182
      if not hasattr(self.op, attr):
4183
        setattr(self.op, attr, None)
4184

    
4185
    # cheap checks, mostly valid constants given
4186

    
4187
    # verify creation mode
4188
    if self.op.mode not in (constants.INSTANCE_CREATE,
4189
                            constants.INSTANCE_IMPORT):
4190
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
4191
                                 self.op.mode)
4192

    
4193
    # disk template and mirror node verification
4194
    if self.op.disk_template not in constants.DISK_TEMPLATES:
4195
      raise errors.OpPrereqError("Invalid disk template name")
4196

    
4197
    if self.op.hypervisor is None:
4198
      self.op.hypervisor = self.cfg.GetHypervisorType()
4199

    
4200
    cluster = self.cfg.GetClusterInfo()
4201
    enabled_hvs = cluster.enabled_hypervisors
4202
    if self.op.hypervisor not in enabled_hvs:
4203
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
4204
                                 " cluster (%s)" % (self.op.hypervisor,
4205
                                  ",".join(enabled_hvs)))
4206

    
4207
    # check hypervisor parameter syntax (locally)
4208

    
4209
    filled_hvp = cluster.FillDict(cluster.hvparams[self.op.hypervisor],
4210
                                  self.op.hvparams)
4211
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
4212
    hv_type.CheckParameterSyntax(filled_hvp)
4213

    
4214
    # fill and remember the beparams dict
4215
    utils.CheckBEParams(self.op.beparams)
4216
    self.be_full = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
4217
                                    self.op.beparams)
4218

    
4219
    #### instance parameters check
4220

    
4221
    # instance name verification
4222
    hostname1 = utils.HostInfo(self.op.instance_name)
4223
    self.op.instance_name = instance_name = hostname1.name
4224

    
4225
    # this is just a preventive check, but someone might still add this
4226
    # instance in the meantime, and creation will fail at lock-add time
4227
    if instance_name in self.cfg.GetInstanceList():
4228
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
4229
                                 instance_name)
4230

    
4231
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
4232

    
4233
    # NIC buildup
4234
    self.nics = []
4235
    for nic in self.op.nics:
4236
      # ip validity checks
4237
      ip = nic.get("ip", None)
4238
      if ip is None or ip.lower() == "none":
4239
        nic_ip = None
4240
      elif ip.lower() == constants.VALUE_AUTO:
4241
        nic_ip = hostname1.ip
4242
      else:
4243
        if not utils.IsValidIP(ip):
4244
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
4245
                                     " like a valid IP" % ip)
4246
        nic_ip = ip
4247

    
4248
      # MAC address verification
4249
      mac = nic.get("mac", constants.VALUE_AUTO)
4250
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4251
        if not utils.IsValidMac(mac.lower()):
4252
          raise errors.OpPrereqError("Invalid MAC address specified: %s" %
4253
                                     mac)
4254
      # bridge verification
4255
      bridge = nic.get("bridge", None)
4256
      if bridge is None:
4257
        bridge = self.cfg.GetDefBridge()
4258
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, bridge=bridge))
4259

    
4260
    # disk checks/pre-build
4261
    self.disks = []
4262
    for disk in self.op.disks:
4263
      mode = disk.get("mode", constants.DISK_RDWR)
4264
      if mode not in constants.DISK_ACCESS_SET:
4265
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
4266
                                   mode)
4267
      size = disk.get("size", None)
4268
      if size is None:
4269
        raise errors.OpPrereqError("Missing disk size")
4270
      try:
4271
        size = int(size)
4272
      except ValueError:
4273
        raise errors.OpPrereqError("Invalid disk size '%s'" % size)
4274
      self.disks.append({"size": size, "mode": mode})
4275

    
4276
    # used in CheckPrereq for ip ping check
4277
    self.check_ip = hostname1.ip
4278

    
4279
    # file storage checks
4280
    if (self.op.file_driver and
4281
        not self.op.file_driver in constants.FILE_DRIVER):
4282
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
4283
                                 self.op.file_driver)
4284

    
4285
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
4286
      raise errors.OpPrereqError("File storage directory path not absolute")
4287

    
4288
    ### Node/iallocator related checks
4289
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
4290
      raise errors.OpPrereqError("One and only one of iallocator and primary"
4291
                                 " node must be given")
4292

    
4293
    if self.op.iallocator:
4294
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4295
    else:
4296
      self.op.pnode = self._ExpandNode(self.op.pnode)
4297
      nodelist = [self.op.pnode]
4298
      if self.op.snode is not None:
4299
        self.op.snode = self._ExpandNode(self.op.snode)
4300
        nodelist.append(self.op.snode)
4301
      self.needed_locks[locking.LEVEL_NODE] = nodelist
4302

    
4303
    # in case of import lock the source node too
4304
    if self.op.mode == constants.INSTANCE_IMPORT:
4305
      src_node = getattr(self.op, "src_node", None)
4306
      src_path = getattr(self.op, "src_path", None)
4307

    
4308
      if src_path is None:
4309
        self.op.src_path = src_path = self.op.instance_name
4310

    
4311
      if src_node is None:
4312
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4313
        self.op.src_node = None
4314
        if os.path.isabs(src_path):
4315
          raise errors.OpPrereqError("Importing an instance from an absolute"
4316
                                     " path requires a source node option.")
4317
      else:
4318
        self.op.src_node = src_node = self._ExpandNode(src_node)
4319
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4320
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
4321
        if not os.path.isabs(src_path):
4322
          self.op.src_path = src_path = \
4323
            os.path.join(constants.EXPORT_DIR, src_path)
4324

    
4325
    else: # INSTANCE_CREATE
4326
      if getattr(self.op, "os_type", None) is None:
4327
        raise errors.OpPrereqError("No guest OS specified")
4328

    
4329
  def _RunAllocator(self):
4330
    """Run the allocator based on input opcode.
4331

4332
    """
4333
    nics = [n.ToDict() for n in self.nics]
4334
    ial = IAllocator(self,
4335
                     mode=constants.IALLOCATOR_MODE_ALLOC,
4336
                     name=self.op.instance_name,
4337
                     disk_template=self.op.disk_template,
4338
                     tags=[],
4339
                     os=self.op.os_type,
4340
                     vcpus=self.be_full[constants.BE_VCPUS],
4341
                     mem_size=self.be_full[constants.BE_MEMORY],
4342
                     disks=self.disks,
4343
                     nics=nics,
4344
                     hypervisor=self.op.hypervisor,
4345
                     )
4346

    
4347
    ial.Run(self.op.iallocator)
4348

    
4349
    if not ial.success:
4350
      raise errors.OpPrereqError("Can't compute nodes using"
4351
                                 " iallocator '%s': %s" % (self.op.iallocator,
4352
                                                           ial.info))
4353
    if len(ial.nodes) != ial.required_nodes:
4354
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4355
                                 " of nodes (%s), required %s" %
4356
                                 (self.op.iallocator, len(ial.nodes),
4357
                                  ial.required_nodes))
4358
    self.op.pnode = ial.nodes[0]
4359
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
4360
                 self.op.instance_name, self.op.iallocator,
4361
                 ", ".join(ial.nodes))
4362
    if ial.required_nodes == 2:
4363
      self.op.snode = ial.nodes[1]
4364

    
4365
  def BuildHooksEnv(self):
4366
    """Build hooks env.
4367

4368
    This runs on master, primary and secondary nodes of the instance.
4369

4370
    """
4371
    env = {
4372
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
4373
      "INSTANCE_DISK_SIZE": ",".join(str(d["size"]) for d in self.disks),
4374
      "INSTANCE_ADD_MODE": self.op.mode,
4375
      }
4376
    if self.op.mode == constants.INSTANCE_IMPORT:
4377
      env["INSTANCE_SRC_NODE"] = self.op.src_node
4378
      env["INSTANCE_SRC_PATH"] = self.op.src_path
4379
      env["INSTANCE_SRC_IMAGES"] = self.src_images
4380

    
4381
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
4382
      primary_node=self.op.pnode,
4383
      secondary_nodes=self.secondaries,
4384
      status=self.op.start,
4385
      os_type=self.op.os_type,
4386
      memory=self.be_full[constants.BE_MEMORY],
4387
      vcpus=self.be_full[constants.BE_VCPUS],
4388
      nics=[(n.ip, n.bridge, n.mac) for n in self.nics],
4389
    ))
4390

    
4391
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
4392
          self.secondaries)
4393
    return env, nl, nl
4394

    
4395

    
4396
  def CheckPrereq(self):
4397
    """Check prerequisites.
4398

4399
    """
4400
    if (not self.cfg.GetVGName() and
4401
        self.op.disk_template not in constants.DTS_NOT_LVM):
4402
      raise errors.OpPrereqError("Cluster does not support lvm-based"
4403
                                 " instances")
4404

    
4405

    
4406
    if self.op.mode == constants.INSTANCE_IMPORT:
4407
      src_node = self.op.src_node
4408
      src_path = self.op.src_path
4409

    
4410
      if src_node is None:
4411
        exp_list = self.rpc.call_export_list(
4412
          self.acquired_locks[locking.LEVEL_NODE])
4413
        found = False
4414
        for node in exp_list:
4415
          if not exp_list[node].failed and src_path in exp_list[node].data:
4416
            found = True
4417
            self.op.src_node = src_node = node
4418
            self.op.src_path = src_path = os.path.join(constants.EXPORT_DIR,
4419
                                                       src_path)
4420
            break
4421
        if not found:
4422
          raise errors.OpPrereqError("No export found for relative path %s" %
4423
                                      src_path)
4424

    
4425
      _CheckNodeOnline(self, src_node)
4426
      result = self.rpc.call_export_info(src_node, src_path)
4427
      result.Raise()
4428
      if not result.data:
4429
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
4430

    
4431
      export_info = result.data
4432
      if not export_info.has_section(constants.INISECT_EXP):
4433
        raise errors.ProgrammerError("Corrupted export config")
4434

    
4435
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
4436
      if (int(ei_version) != constants.EXPORT_VERSION):
4437
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
4438
                                   (ei_version, constants.EXPORT_VERSION))
4439

    
4440
      # Check that the new instance doesn't have less disks than the export
4441
      instance_disks = len(self.disks)
4442
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
4443
      if instance_disks < export_disks:
4444
        raise errors.OpPrereqError("Not enough disks to import."
4445
                                   " (instance: %d, export: %d)" %
4446
                                   (instance_disks, export_disks))
4447

    
4448
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
4449
      disk_images = []
4450
      for idx in range(export_disks):
4451
        option = 'disk%d_dump' % idx
4452
        if export_info.has_option(constants.INISECT_INS, option):
4453
          # FIXME: are the old os-es, disk sizes, etc. useful?
4454
          export_name = export_info.get(constants.INISECT_INS, option)
4455
          image = os.path.join(src_path, export_name)
4456
          disk_images.append(image)
4457
        else:
4458
          disk_images.append(False)
4459

    
4460
      self.src_images = disk_images
4461

    
4462
      old_name = export_info.get(constants.INISECT_INS, 'name')
4463
      # FIXME: int() here could throw a ValueError on broken exports
4464
      exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
4465
      if self.op.instance_name == old_name:
4466
        for idx, nic in enumerate(self.nics):
4467
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
4468
            nic_mac_ini = 'nic%d_mac' % idx
4469
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
4470

    
4471
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
4472
    if self.op.start and not self.op.ip_check:
4473
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
4474
                                 " adding an instance in start mode")
4475

    
4476
    if self.op.ip_check:
4477
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
4478
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
4479
                                   (self.check_ip, self.op.instance_name))
4480

    
4481
    #### allocator run
4482

    
4483
    if self.op.iallocator is not None:
4484
      self._RunAllocator()
4485

    
4486
    #### node related checks
4487

    
4488
    # check primary node
4489
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
4490
    assert self.pnode is not None, \
4491
      "Cannot retrieve locked node %s" % self.op.pnode
4492
    if pnode.offline:
4493
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
4494
                                 pnode.name)
4495
    if pnode.drained:
4496
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
4497
                                 pnode.name)
4498

    
4499
    self.secondaries = []
4500

    
4501
    # mirror node verification
4502
    if self.op.disk_template in constants.DTS_NET_MIRROR:
4503
      if self.op.snode is None:
4504
        raise errors.OpPrereqError("The networked disk templates need"
4505
                                   " a mirror node")
4506
      if self.op.snode == pnode.name:
4507
        raise errors.OpPrereqError("The secondary node cannot be"
4508
                                   " the primary node.")
4509
      _CheckNodeOnline(self, self.op.snode)
4510
      _CheckNodeNotDrained(self, self.op.snode)
4511
      self.secondaries.append(self.op.snode)
4512

    
4513
    nodenames = [pnode.name] + self.secondaries
4514

    
4515
    req_size = _ComputeDiskSize(self.op.disk_template,
4516
                                self.disks)
4517

    
4518
    # Check lv size requirements
4519
    if req_size is not None:
4520
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4521
                                         self.op.hypervisor)
4522
      for node in nodenames:
4523
        info = nodeinfo[node]
4524
        info.Raise()
4525
        info = info.data
4526
        if not info:
4527
          raise errors.OpPrereqError("Cannot get current information"
4528
                                     " from node '%s'" % node)
4529
        vg_free = info.get('vg_free', None)
4530
        if not isinstance(vg_free, int):
4531
          raise errors.OpPrereqError("Can't compute free disk space on"
4532
                                     " node %s" % node)
4533
        if req_size > info['vg_free']:
4534
          raise errors.OpPrereqError("Not enough disk space on target node %s."
4535
                                     " %d MB available, %d MB required" %
4536
                                     (node, info['vg_free'], req_size))
4537

    
4538
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
4539

    
4540
    # os verification
4541
    result = self.rpc.call_os_get(pnode.name, self.op.os_type)
4542
    result.Raise()
4543
    if not isinstance(result.data, objects.OS):
4544
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
4545
                                 " primary node"  % self.op.os_type)
4546

    
4547
    # bridge check on primary node
4548
    bridges = [n.bridge for n in self.nics]
4549
    result = self.rpc.call_bridges_exist(self.pnode.name, bridges)
4550
    result.Raise()
4551
    if not result.data:
4552
      raise errors.OpPrereqError("One of the target bridges '%s' does not"
4553
                                 " exist on destination node '%s'" %
4554
                                 (",".join(bridges), pnode.name))
4555

    
4556
    # memory check on primary node
4557
    if self.op.start:
4558
      _CheckNodeFreeMemory(self, self.pnode.name,
4559
                           "creating instance %s" % self.op.instance_name,
4560
                           self.be_full[constants.BE_MEMORY],
4561
                           self.op.hypervisor)
4562

    
4563
  def Exec(self, feedback_fn):
4564
    """Create and add the instance to the cluster.
4565

4566
    """
4567
    instance = self.op.instance_name
4568
    pnode_name = self.pnode.name
4569

    
4570
    for nic in self.nics:
4571
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4572
        nic.mac = self.cfg.GenerateMAC()
4573

    
4574
    ht_kind = self.op.hypervisor
4575
    if ht_kind in constants.HTS_REQ_PORT:
4576
      network_port = self.cfg.AllocatePort()
4577
    else:
4578
      network_port = None
4579

    
4580
    ##if self.op.vnc_bind_address is None:
4581
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
4582

    
4583
    # this is needed because os.path.join does not accept None arguments
4584
    if self.op.file_storage_dir is None:
4585
      string_file_storage_dir = ""
4586
    else:
4587
      string_file_storage_dir = self.op.file_storage_dir
4588

    
4589
    # build the full file storage dir path
4590
    file_storage_dir = os.path.normpath(os.path.join(
4591
                                        self.cfg.GetFileStorageDir(),
4592
                                        string_file_storage_dir, instance))
4593

    
4594

    
4595
    disks = _GenerateDiskTemplate(self,
4596
                                  self.op.disk_template,
4597
                                  instance, pnode_name,
4598
                                  self.secondaries,
4599
                                  self.disks,
4600
                                  file_storage_dir,
4601
                                  self.op.file_driver,
4602
                                  0)
4603

    
4604
    iobj = objects.Instance(name=instance, os=self.op.os_type,
4605
                            primary_node=pnode_name,
4606
                            nics=self.nics, disks=disks,
4607
                            disk_template=self.op.disk_template,
4608
                            admin_up=False,
4609
                            network_port=network_port,
4610
                            beparams=self.op.beparams,
4611
                            hvparams=self.op.hvparams,
4612
                            hypervisor=self.op.hypervisor,
4613
                            )
4614

    
4615
    feedback_fn("* creating instance disks...")
4616
    try:
4617
      _CreateDisks(self, iobj)
4618
    except errors.OpExecError:
4619
      self.LogWarning("Device creation failed, reverting...")
4620
      try:
4621
        _RemoveDisks(self, iobj)
4622
      finally:
4623
        self.cfg.ReleaseDRBDMinors(instance)
4624
        raise
4625

    
4626
    feedback_fn("adding instance %s to cluster config" % instance)
4627

    
4628
    self.cfg.AddInstance(iobj)
4629
    # Declare that we don't want to remove the instance lock anymore, as we've
4630
    # added the instance to the config
4631
    del self.remove_locks[locking.LEVEL_INSTANCE]
4632
    # Unlock all the nodes
4633
    if self.op.mode == constants.INSTANCE_IMPORT:
4634
      nodes_keep = [self.op.src_node]
4635
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
4636
                       if node != self.op.src_node]
4637
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
4638
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
4639
    else:
4640
      self.context.glm.release(locking.LEVEL_NODE)
4641
      del self.acquired_locks[locking.LEVEL_NODE]
4642

    
4643
    if self.op.wait_for_sync:
4644
      disk_abort = not _WaitForSync(self, iobj)
4645
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
4646
      # make sure the disks are not degraded (still sync-ing is ok)
4647
      time.sleep(15)
4648
      feedback_fn("* checking mirrors status")
4649
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
4650
    else:
4651
      disk_abort = False
4652

    
4653
    if disk_abort:
4654
      _RemoveDisks(self, iobj)
4655
      self.cfg.RemoveInstance(iobj.name)
4656
      # Make sure the instance lock gets removed
4657
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
4658
      raise errors.OpExecError("There are some degraded disks for"
4659
                               " this instance")
4660

    
4661
    feedback_fn("creating os for instance %s on node %s" %
4662
                (instance, pnode_name))
4663

    
4664
    if iobj.disk_template != constants.DT_DISKLESS:
4665
      if self.op.mode == constants.INSTANCE_CREATE:
4666
        feedback_fn("* running the instance OS create scripts...")
4667
        result = self.rpc.call_instance_os_add(pnode_name, iobj)
4668
        msg = result.RemoteFailMsg()
4669
        if msg:
4670
          raise errors.OpExecError("Could not add os for instance %s"
4671
                                   " on node %s: %s" %
4672
                                   (instance, pnode_name, msg))
4673

    
4674
      elif self.op.mode == constants.INSTANCE_IMPORT:
4675
        feedback_fn("* running the instance OS import scripts...")
4676
        src_node = self.op.src_node
4677
        src_images = self.src_images
4678
        cluster_name = self.cfg.GetClusterName()
4679
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
4680
                                                         src_node, src_images,
4681
                                                         cluster_name)
4682
        import_result.Raise()
4683
        for idx, result in enumerate(import_result.data):
4684
          if not result:
4685
            self.LogWarning("Could not import the image %s for instance"
4686
                            " %s, disk %d, on node %s" %
4687
                            (src_images[idx], instance, idx, pnode_name))
4688
      else:
4689
        # also checked in the prereq part
4690
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
4691
                                     % self.op.mode)
4692

    
4693
    if self.op.start:
4694
      iobj.admin_up = True
4695
      self.cfg.Update(iobj)
4696
      logging.info("Starting instance %s on node %s", instance, pnode_name)
4697
      feedback_fn("* starting instance...")
4698
      result = self.rpc.call_instance_start(pnode_name, iobj, None)
4699
      msg = result.RemoteFailMsg()
4700
      if msg:
4701
        raise errors.OpExecError("Could not start instance: %s" % msg)
4702

    
4703

    
4704
class LUConnectConsole(NoHooksLU):
4705
  """Connect to an instance's console.
4706

4707
  This is somewhat special in that it returns the command line that
4708
  you need to run on the master node in order to connect to the
4709
  console.
4710

4711
  """
4712
  _OP_REQP = ["instance_name"]
4713
  REQ_BGL = False
4714

    
4715
  def ExpandNames(self):
4716
    self._ExpandAndLockInstance()
4717

    
4718
  def CheckPrereq(self):
4719
    """Check prerequisites.
4720

4721
    This checks that the instance is in the cluster.
4722

4723
    """
4724
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4725
    assert self.instance is not None, \
4726
      "Cannot retrieve locked instance %s" % self.op.instance_name
4727
    _CheckNodeOnline(self, self.instance.primary_node)
4728

    
4729
  def Exec(self, feedback_fn):
4730
    """Connect to the console of an instance
4731

4732
    """
4733
    instance = self.instance
4734
    node = instance.primary_node
4735

    
4736
    node_insts = self.rpc.call_instance_list([node],
4737
                                             [instance.hypervisor])[node]
4738
    node_insts.Raise()
4739

    
4740
    if instance.name not in node_insts.data:
4741
      raise errors.OpExecError("Instance %s is not running." % instance.name)
4742

    
4743
    logging.debug("Connecting to console of %s on %s", instance.name, node)
4744

    
4745
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
4746
    cluster = self.cfg.GetClusterInfo()
4747
    # beparams and hvparams are passed separately, to avoid editing the
4748
    # instance and then saving the defaults in the instance itself.
4749
    hvparams = cluster.FillHV(instance)
4750
    beparams = cluster.FillBE(instance)
4751
    console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
4752

    
4753
    # build ssh cmdline
4754
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
4755

    
4756

    
4757
class LUReplaceDisks(LogicalUnit):
4758
  """Replace the disks of an instance.
4759

4760
  """
4761
  HPATH = "mirrors-replace"
4762
  HTYPE = constants.HTYPE_INSTANCE
4763
  _OP_REQP = ["instance_name", "mode", "disks"]
4764
  REQ_BGL = False
4765

    
4766
  def CheckArguments(self):
4767
    if not hasattr(self.op, "remote_node"):
4768
      self.op.remote_node = None
4769
    if not hasattr(self.op, "iallocator"):
4770
      self.op.iallocator = None
4771

    
4772
    # check for valid parameter combination
4773
    cnt = [self.op.remote_node, self.op.iallocator].count(None)
4774
    if self.op.mode == constants.REPLACE_DISK_CHG:
4775
      if cnt == 2:
4776
        raise errors.OpPrereqError("When changing the secondary either an"
4777
                                   " iallocator script must be used or the"
4778
                                   " new node given")
4779
      elif cnt == 0:
4780
        raise errors.OpPrereqError("Give either the iallocator or the new"
4781
                                   " secondary, not both")
4782
    else: # not replacing the secondary
4783
      if cnt != 2:
4784
        raise errors.OpPrereqError("The iallocator and new node options can"
4785
                                   " be used only when changing the"
4786
                                   " secondary node")
4787

    
4788
  def ExpandNames(self):
4789
    self._ExpandAndLockInstance()
4790

    
4791
    if self.op.iallocator is not None:
4792
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4793
    elif self.op.remote_node is not None:
4794
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
4795
      if remote_node is None:
4796
        raise errors.OpPrereqError("Node '%s' not known" %
4797
                                   self.op.remote_node)
4798
      self.op.remote_node = remote_node
4799
      # Warning: do not remove the locking of the new secondary here
4800
      # unless DRBD8.AddChildren is changed to work in parallel;
4801
      # currently it doesn't since parallel invocations of
4802
      # FindUnusedMinor will conflict
4803
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
4804
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
4805
    else:
4806
      self.needed_locks[locking.LEVEL_NODE] = []
4807
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4808

    
4809
  def DeclareLocks(self, level):
4810
    # If we're not already locking all nodes in the set we have to declare the
4811
    # instance's primary/secondary nodes.
4812
    if (level == locking.LEVEL_NODE and
4813
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
4814
      self._LockInstancesNodes()
4815

    
4816
  def _RunAllocator(self):
4817
    """Compute a new secondary node using an IAllocator.
4818

4819
    """
4820
    ial = IAllocator(self,
4821
                     mode=constants.IALLOCATOR_MODE_RELOC,
4822
                     name=self.op.instance_name,
4823
                     relocate_from=[self.sec_node])
4824

    
4825
    ial.Run(self.op.iallocator)
4826

    
4827
    if not ial.success:
4828
      raise errors.OpPrereqError("Can't compute nodes using"
4829
                                 " iallocator '%s': %s" % (self.op.iallocator,
4830
                                                           ial.info))
4831
    if len(ial.nodes) != ial.required_nodes:
4832
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4833
                                 " of nodes (%s), required %s" %
4834
                                 (len(ial.nodes), ial.required_nodes))
4835
    self.op.remote_node = ial.nodes[0]
4836
    self.LogInfo("Selected new secondary for the instance: %s",
4837
                 self.op.remote_node)
4838

    
4839
  def BuildHooksEnv(self):
4840
    """Build hooks env.
4841

4842
    This runs on the master, the primary and all the secondaries.
4843

4844
    """
4845
    env = {
4846
      "MODE": self.op.mode,
4847
      "NEW_SECONDARY": self.op.remote_node,
4848
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
4849
      }
4850
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4851
    nl = [
4852
      self.cfg.GetMasterNode(),
4853
      self.instance.primary_node,
4854
      ]
4855
    if self.op.remote_node is not None:
4856
      nl.append(self.op.remote_node)
4857
    return env, nl, nl
4858

    
4859
  def CheckPrereq(self):
4860
    """Check prerequisites.
4861

4862
    This checks that the instance is in the cluster.
4863

4864
    """
4865
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4866
    assert instance is not None, \
4867
      "Cannot retrieve locked instance %s" % self.op.instance_name
4868
    self.instance = instance
4869

    
4870
    if instance.disk_template != constants.DT_DRBD8:
4871
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
4872
                                 " instances")
4873

    
4874
    if len(instance.secondary_nodes) != 1:
4875
      raise errors.OpPrereqError("The instance has a strange layout,"
4876
                                 " expected one secondary but found %d" %
4877
                                 len(instance.secondary_nodes))
4878

    
4879
    self.sec_node = instance.secondary_nodes[0]
4880

    
4881
    if self.op.iallocator is not None:
4882
      self._RunAllocator()
4883

    
4884
    remote_node = self.op.remote_node
4885
    if remote_node is not None:
4886
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
4887
      assert self.remote_node_info is not None, \
4888
        "Cannot retrieve locked node %s" % remote_node
4889
    else:
4890
      self.remote_node_info = None
4891
    if remote_node == instance.primary_node:
4892
      raise errors.OpPrereqError("The specified node is the primary node of"
4893
                                 " the instance.")
4894
    elif remote_node == self.sec_node:
4895
      raise errors.OpPrereqError("The specified node is already the"
4896
                                 " secondary node of the instance.")
4897

    
4898
    if self.op.mode == constants.REPLACE_DISK_PRI:
4899
      n1 = self.tgt_node = instance.primary_node
4900
      n2 = self.oth_node = self.sec_node
4901
    elif self.op.mode == constants.REPLACE_DISK_SEC:
4902
      n1 = self.tgt_node = self.sec_node
4903
      n2 = self.oth_node = instance.primary_node
4904
    elif self.op.mode == constants.REPLACE_DISK_CHG:
4905
      n1 = self.new_node = remote_node
4906
      n2 = self.oth_node = instance.primary_node
4907
      self.tgt_node = self.sec_node
4908
      _CheckNodeNotDrained(self, remote_node)
4909
    else:
4910
      raise errors.ProgrammerError("Unhandled disk replace mode")
4911

    
4912
    _CheckNodeOnline(self, n1)
4913
    _CheckNodeOnline(self, n2)
4914

    
4915
    if not self.op.disks:
4916
      self.op.disks = range(len(instance.disks))
4917

    
4918
    for disk_idx in self.op.disks:
4919
      instance.FindDisk(disk_idx)
4920

    
4921
  def _ExecD8DiskOnly(self, feedback_fn):
4922
    """Replace a disk on the primary or secondary for dbrd8.
4923

4924
    The algorithm for replace is quite complicated:
4925

4926
      1. for each disk to be replaced:
4927

4928
        1. create new LVs on the target node with unique names
4929
        1. detach old LVs from the drbd device
4930
        1. rename old LVs to name_replaced.<time_t>
4931
        1. rename new LVs to old LVs
4932
        1. attach the new LVs (with the old names now) to the drbd device
4933

4934
      1. wait for sync across all devices
4935

4936
      1. for each modified disk:
4937

4938
        1. remove old LVs (which have the name name_replaces.<time_t>)
4939

4940
    Failures are not very well handled.
4941

4942
    """
4943
    steps_total = 6
4944
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
4945
    instance = self.instance
4946
    iv_names = {}
4947
    vgname = self.cfg.GetVGName()
4948
    # start of work
4949
    cfg = self.cfg
4950
    tgt_node = self.tgt_node
4951
    oth_node = self.oth_node
4952

    
4953
    # Step: check device activation
4954
    self.proc.LogStep(1, steps_total, "check device existence")
4955
    info("checking volume groups")
4956
    my_vg = cfg.GetVGName()
4957
    results = self.rpc.call_vg_list([oth_node, tgt_node])
4958
    if not results:
4959
      raise errors.OpExecError("Can't list volume groups on the nodes")
4960
    for node in oth_node, tgt_node:
4961
      res = results[node]
4962
      if res.failed or not res.data or my_vg not in res.data:
4963
        raise errors.OpExecError("Volume group '%s' not found on %s" %
4964
                                 (my_vg, node))
4965
    for idx, dev in enumerate(instance.disks):
4966
      if idx not in self.op.disks:
4967
        continue
4968
      for node in tgt_node, oth_node:
4969
        info("checking disk/%d on %s" % (idx, node))
4970
        cfg.SetDiskID(dev, node)
4971
        result = self.rpc.call_blockdev_find(node, dev)
4972
        msg = result.RemoteFailMsg()
4973
        if not msg and not result.payload:
4974
          msg = "disk not found"
4975
        if msg:
4976
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
4977
                                   (idx, node, msg))
4978

    
4979
    # Step: check other node consistency
4980
    self.proc.LogStep(2, steps_total, "check peer consistency")
4981
    for idx, dev in enumerate(instance.disks):
4982
      if idx not in self.op.disks:
4983
        continue
4984
      info("checking disk/%d consistency on %s" % (idx, oth_node))
4985
      if not _CheckDiskConsistency(self, dev, oth_node,
4986
                                   oth_node==instance.primary_node):
4987
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
4988
                                 " to replace disks on this node (%s)" %
4989
                                 (oth_node, tgt_node))
4990

    
4991
    # Step: create new storage
4992
    self.proc.LogStep(3, steps_total, "allocate new storage")
4993
    for idx, dev in enumerate(instance.disks):
4994
      if idx not in self.op.disks:
4995
        continue
4996
      size = dev.size
4997
      cfg.SetDiskID(dev, tgt_node)
4998
      lv_names = [".disk%d_%s" % (idx, suf)
4999
                  for suf in ["data", "meta"]]
5000
      names = _GenerateUniqueNames(self, lv_names)
5001
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
5002
                             logical_id=(vgname, names[0]))
5003
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
5004
                             logical_id=(vgname, names[1]))
5005
      new_lvs = [lv_data, lv_meta]
5006
      old_lvs = dev.children
5007
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
5008
      info("creating new local storage on %s for %s" %
5009
           (tgt_node, dev.iv_name))
5010
      # we pass force_create=True to force the LVM creation
5011
      for new_lv in new_lvs:
5012
        _CreateBlockDev(self, tgt_node, instance, new_lv, True,
5013
                        _GetInstanceInfoText(instance), False)
5014

    
5015
    # Step: for each lv, detach+rename*2+attach
5016
    self.proc.LogStep(4, steps_total, "change drbd configuration")
5017
    for dev, old_lvs, new_lvs in iv_names.itervalues():
5018
      info("detaching %s drbd from local storage" % dev.iv_name)
5019
      result = self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs)
5020
      result.Raise()
5021
      if not result.data:
5022
        raise errors.OpExecError("Can't detach drbd from local storage on node"
5023
                                 " %s for device %s" % (tgt_node, dev.iv_name))
5024
      #dev.children = []
5025
      #cfg.Update(instance)
5026

    
5027
      # ok, we created the new LVs, so now we know we have the needed
5028
      # storage; as such, we proceed on the target node to rename
5029
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
5030
      # using the assumption that logical_id == physical_id (which in
5031
      # turn is the unique_id on that node)
5032

    
5033
      # FIXME(iustin): use a better name for the replaced LVs
5034
      temp_suffix = int(time.time())
5035
      ren_fn = lambda d, suff: (d.physical_id[0],
5036
                                d.physical_id[1] + "_replaced-%s" % suff)
5037
      # build the rename list based on what LVs exist on the node
5038
      rlist = []
5039
      for to_ren in old_lvs:
5040
        result = self.rpc.call_blockdev_find(tgt_node, to_ren)
5041
        if not result.RemoteFailMsg() and result.payload:
5042
          # device exists
5043
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
5044

    
5045
      info("renaming the old LVs on the target node")
5046
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5047
      result.Raise()
5048
      if not result.data:
5049
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
5050
      # now we rename the new LVs to the old LVs
5051
      info("renaming the new LVs on the target node")
5052
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
5053
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5054
      result.Raise()
5055
      if not result.data:
5056
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
5057

    
5058
      for old, new in zip(old_lvs, new_lvs):
5059
        new.logical_id = old.logical_id
5060
        cfg.SetDiskID(new, tgt_node)
5061

    
5062
      for disk in old_lvs:
5063
        disk.logical_id = ren_fn(disk, temp_suffix)
5064
        cfg.SetDiskID(disk, tgt_node)
5065

    
5066
      # now that the new lvs have the old name, we can add them to the device
5067
      info("adding new mirror component on %s" % tgt_node)
5068
      result = self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs)
5069
      if result.failed or not result.data:
5070
        for new_lv in new_lvs:
5071
          msg = self.rpc.call_blockdev_remove(tgt_node, new_lv).RemoteFailMsg()
5072
          if msg:
5073
            warning("Can't rollback device %s: %s", dev, msg,
5074
                    hint="cleanup manually the unused logical volumes")
5075
        raise errors.OpExecError("Can't add local storage to drbd")
5076

    
5077
      dev.children = new_lvs
5078
      cfg.Update(instance)
5079

    
5080
    # Step: wait for sync
5081

    
5082
    # this can fail as the old devices are degraded and _WaitForSync
5083
    # does a combined result over all disks, so we don't check its
5084
    # return value
5085
    self.proc.LogStep(5, steps_total, "sync devices")
5086
    _WaitForSync(self, instance, unlock=True)
5087

    
5088
    # so check manually all the devices
5089
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5090
      cfg.SetDiskID(dev, instance.primary_node)
5091
      result = self.rpc.call_blockdev_find(instance.primary_node, dev)
5092
      msg = result.RemoteFailMsg()
5093
      if not msg and not result.payload:
5094
        msg = "disk not found"
5095
      if msg:
5096
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
5097
                                 (name, msg))
5098
      if result.payload[5]:
5099
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
5100

    
5101
    # Step: remove old storage
5102
    self.proc.LogStep(6, steps_total, "removing old storage")
5103
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5104
      info("remove logical volumes for %s" % name)
5105
      for lv in old_lvs:
5106
        cfg.SetDiskID(lv, tgt_node)
5107
        msg = self.rpc.call_blockdev_remove(tgt_node, lv).RemoteFailMsg()
5108
        if msg:
5109
          warning("Can't remove old LV: %s" % msg,
5110
                  hint="manually remove unused LVs")
5111
          continue
5112

    
5113
  def _ExecD8Secondary(self, feedback_fn):
5114
    """Replace the secondary node for drbd8.
5115

5116
    The algorithm for replace is quite complicated:
5117
      - for all disks of the instance:
5118
        - create new LVs on the new node with same names
5119
        - shutdown the drbd device on the old secondary
5120
        - disconnect the drbd network on the primary
5121
        - create the drbd device on the new secondary
5122
        - network attach the drbd on the primary, using an artifice:
5123
          the drbd code for Attach() will connect to the network if it
5124
          finds a device which is connected to the good local disks but
5125
          not network enabled
5126
      - wait for sync across all devices
5127
      - remove all disks from the old secondary
5128

5129
    Failures are not very well handled.
5130

5131
    """
5132
    steps_total = 6
5133
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
5134
    instance = self.instance
5135
    iv_names = {}
5136
    # start of work
5137
    cfg = self.cfg
5138
    old_node = self.tgt_node
5139
    new_node = self.new_node
5140
    pri_node = instance.primary_node
5141
    nodes_ip = {
5142
      old_node: self.cfg.GetNodeInfo(old_node).secondary_ip,
5143
      new_node: self.cfg.GetNodeInfo(new_node).secondary_ip,
5144
      pri_node: self.cfg.GetNodeInfo(pri_node).secondary_ip,
5145
      }
5146

    
5147
    # Step: check device activation
5148
    self.proc.LogStep(1, steps_total, "check device existence")
5149
    info("checking volume groups")
5150
    my_vg = cfg.GetVGName()
5151
    results = self.rpc.call_vg_list([pri_node, new_node])
5152
    for node in pri_node, new_node:
5153
      res = results[node]
5154
      if res.failed or not res.data or my_vg not in res.data:
5155
        raise errors.OpExecError("Volume group '%s' not found on %s" %
5156
                                 (my_vg, node))
5157
    for idx, dev in enumerate(instance.disks):
5158
      if idx not in self.op.disks:
5159
        continue
5160
      info("checking disk/%d on %s" % (idx, pri_node))
5161
      cfg.SetDiskID(dev, pri_node)
5162
      result = self.rpc.call_blockdev_find(pri_node, dev)
5163
      msg = result.RemoteFailMsg()
5164
      if not msg and not result.payload:
5165
        msg = "disk not found"
5166
      if msg:
5167
        raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5168
                                 (idx, pri_node, msg))
5169

    
5170
    # Step: check other node consistency
5171
    self.proc.LogStep(2, steps_total, "check peer consistency")
5172
    for idx, dev in enumerate(instance.disks):
5173
      if idx not in self.op.disks:
5174
        continue
5175
      info("checking disk/%d consistency on %s" % (idx, pri_node))
5176
      if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
5177
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
5178
                                 " unsafe to replace the secondary" %
5179
                                 pri_node)
5180

    
5181
    # Step: create new storage
5182
    self.proc.LogStep(3, steps_total, "allocate new storage")
5183
    for idx, dev in enumerate(instance.disks):
5184
      info("adding new local storage on %s for disk/%d" %
5185
           (new_node, idx))
5186
      # we pass force_create=True to force LVM creation
5187
      for new_lv in dev.children:
5188
        _CreateBlockDev(self, new_node, instance, new_lv, True,
5189
                        _GetInstanceInfoText(instance), False)
5190

    
5191
    # Step 4: dbrd minors and drbd setups changes
5192
    # after this, we must manually remove the drbd minors on both the
5193
    # error and the success paths
5194
    minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
5195
                                   instance.name)
5196
    logging.debug("Allocated minors %s" % (minors,))
5197
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
5198
    for idx, (dev, new_minor) in enumerate(zip(instance.disks, minors)):
5199
      size = dev.size
5200
      info("activating a new drbd on %s for disk/%d" % (new_node, idx))
5201
      # create new devices on new_node; note that we create two IDs:
5202
      # one without port, so the drbd will be activated without
5203
      # networking information on the new node at this stage, and one
5204
      # with network, for the latter activation in step 4
5205
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
5206
      if pri_node == o_node1:
5207
        p_minor = o_minor1
5208
      else:
5209
        p_minor = o_minor2
5210

    
5211
      new_alone_id = (pri_node, new_node, None, p_minor, new_minor, o_secret)
5212
      new_net_id = (pri_node, new_node, o_port, p_minor, new_minor, o_secret)
5213

    
5214
      iv_names[idx] = (dev, dev.children, new_net_id)
5215
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
5216
                    new_net_id)
5217
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
5218
                              logical_id=new_alone_id,
5219
                              children=dev.children)
5220
      try:
5221
        _CreateSingleBlockDev(self, new_node, instance, new_drbd,
5222
                              _GetInstanceInfoText(instance), False)
5223
      except errors.BlockDeviceError:
5224
        self.cfg.ReleaseDRBDMinors(instance.name)
5225
        raise
5226

    
5227
    for idx, dev in enumerate(instance.disks):
5228
      # we have new devices, shutdown the drbd on the old secondary
5229
      info("shutting down drbd for disk/%d on old node" % idx)
5230
      cfg.SetDiskID(dev, old_node)
5231
      msg = self.rpc.call_blockdev_shutdown(old_node, dev).RemoteFailMsg()
5232
      if msg:
5233
        warning("Failed to shutdown drbd for disk/%d on old node: %s" %
5234
                (idx, msg),
5235
                hint="Please cleanup this device manually as soon as possible")
5236

    
5237
    info("detaching primary drbds from the network (=> standalone)")
5238
    result = self.rpc.call_drbd_disconnect_net([pri_node], nodes_ip,
5239
                                               instance.disks)[pri_node]
5240

    
5241
    msg = result.RemoteFailMsg()
5242
    if msg:
5243
      # detaches didn't succeed (unlikely)
5244
      self.cfg.ReleaseDRBDMinors(instance.name)
5245
      raise errors.OpExecError("Can't detach the disks from the network on"
5246
                               " old node: %s" % (msg,))
5247

    
5248
    # if we managed to detach at least one, we update all the disks of
5249
    # the instance to point to the new secondary
5250
    info("updating instance configuration")
5251
    for dev, _, new_logical_id in iv_names.itervalues():
5252
      dev.logical_id = new_logical_id
5253
      cfg.SetDiskID(dev, pri_node)
5254
    cfg.Update(instance)
5255

    
5256
    # and now perform the drbd attach
5257
    info("attaching primary drbds to new secondary (standalone => connected)")
5258
    result = self.rpc.call_drbd_attach_net([pri_node, new_node], nodes_ip,
5259
                                           instance.disks, instance.name,
5260
                                           False)
5261
    for to_node, to_result in result.items():
5262
      msg = to_result.RemoteFailMsg()
5263
      if msg:
5264
        warning("can't attach drbd disks on node %s: %s", to_node, msg,
5265
                hint="please do a gnt-instance info to see the"
5266
                " status of disks")
5267

    
5268
    # this can fail as the old devices are degraded and _WaitForSync
5269
    # does a combined result over all disks, so we don't check its
5270
    # return value
5271
    self.proc.LogStep(5, steps_total, "sync devices")
5272
    _WaitForSync(self, instance, unlock=True)
5273

    
5274
    # so check manually all the devices
5275
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
5276
      cfg.SetDiskID(dev, pri_node)
5277
      result = self.rpc.call_blockdev_find(pri_node, dev)
5278
      msg = result.RemoteFailMsg()
5279
      if not msg and not result.payload:
5280
        msg = "disk not found"
5281
      if msg:
5282
        raise errors.OpExecError("Can't find DRBD device disk/%d: %s" %
5283
                                 (idx, msg))
5284
      if result.payload[5]:
5285
        raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx)
5286

    
5287
    self.proc.LogStep(6, steps_total, "removing old storage")
5288
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
5289
      info("remove logical volumes for disk/%d" % idx)
5290
      for lv in old_lvs:
5291
        cfg.SetDiskID(lv, old_node)
5292
        msg = self.rpc.call_blockdev_remove(old_node, lv).RemoteFailMsg()
5293
        if msg:
5294
          warning("Can't remove LV on old secondary: %s", msg,
5295
                  hint="Cleanup stale volumes by hand")
5296

    
5297
  def Exec(self, feedback_fn):
5298
    """Execute disk replacement.
5299

5300
    This dispatches the disk replacement to the appropriate handler.
5301

5302
    """
5303
    instance = self.instance
5304

    
5305
    # Activate the instance disks if we're replacing them on a down instance
5306
    if not instance.admin_up:
5307
      _StartInstanceDisks(self, instance, True)
5308

    
5309
    if self.op.mode == constants.REPLACE_DISK_CHG:
5310
      fn = self._ExecD8Secondary
5311
    else:
5312
      fn = self._ExecD8DiskOnly
5313

    
5314
    ret = fn(feedback_fn)
5315

    
5316
    # Deactivate the instance disks if we're replacing them on a down instance
5317
    if not instance.admin_up:
5318
      _SafeShutdownInstanceDisks(self, instance)
5319

    
5320
    return ret
5321

    
5322

    
5323
class LUGrowDisk(LogicalUnit):
5324
  """Grow a disk of an instance.
5325

5326
  """
5327
  HPATH = "disk-grow"
5328
  HTYPE = constants.HTYPE_INSTANCE
5329
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
5330
  REQ_BGL = False
5331

    
5332
  def ExpandNames(self):
5333
    self._ExpandAndLockInstance()
5334
    self.needed_locks[locking.LEVEL_NODE] = []
5335
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5336

    
5337
  def DeclareLocks(self, level):
5338
    if level == locking.LEVEL_NODE:
5339
      self._LockInstancesNodes()
5340

    
5341
  def BuildHooksEnv(self):
5342
    """Build hooks env.
5343

5344
    This runs on the master, the primary and all the secondaries.
5345

5346
    """
5347
    env = {
5348
      "DISK": self.op.disk,
5349
      "AMOUNT": self.op.amount,
5350
      }
5351
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5352
    nl = [
5353
      self.cfg.GetMasterNode(),
5354
      self.instance.primary_node,
5355
      ]
5356
    return env, nl, nl
5357

    
5358
  def CheckPrereq(self):
5359
    """Check prerequisites.
5360

5361
    This checks that the instance is in the cluster.
5362

5363
    """
5364
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5365
    assert instance is not None, \
5366
      "Cannot retrieve locked instance %s" % self.op.instance_name
5367
    nodenames = list(instance.all_nodes)
5368
    for node in nodenames:
5369
      _CheckNodeOnline(self, node)
5370

    
5371

    
5372
    self.instance = instance
5373

    
5374
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
5375
      raise errors.OpPrereqError("Instance's disk layout does not support"
5376
                                 " growing.")
5377

    
5378
    self.disk = instance.FindDisk(self.op.disk)
5379

    
5380
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
5381
                                       instance.hypervisor)
5382
    for node in nodenames:
5383
      info = nodeinfo[node]
5384
      if info.failed or not info.data:
5385
        raise errors.OpPrereqError("Cannot get current information"
5386
                                   " from node '%s'" % node)
5387
      vg_free = info.data.get('vg_free', None)
5388
      if not isinstance(vg_free, int):
5389
        raise errors.OpPrereqError("Can't compute free disk space on"
5390
                                   " node %s" % node)
5391
      if self.op.amount > vg_free:
5392
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
5393
                                   " %d MiB available, %d MiB required" %
5394
                                   (node, vg_free, self.op.amount))
5395

    
5396
  def Exec(self, feedback_fn):
5397
    """Execute disk grow.
5398

5399
    """
5400
    instance = self.instance
5401
    disk = self.disk
5402
    for node in instance.all_nodes:
5403
      self.cfg.SetDiskID(disk, node)
5404
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
5405
      msg = result.RemoteFailMsg()
5406
      if msg:
5407
        raise errors.OpExecError("Grow request failed to node %s: %s" %
5408
                                 (node, msg))
5409
    disk.RecordGrow(self.op.amount)
5410
    self.cfg.Update(instance)
5411
    if self.op.wait_for_sync:
5412
      disk_abort = not _WaitForSync(self, instance)
5413
      if disk_abort:
5414
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
5415
                             " status.\nPlease check the instance.")
5416

    
5417

    
5418
class LUQueryInstanceData(NoHooksLU):
5419
  """Query runtime instance data.
5420

5421
  """
5422
  _OP_REQP = ["instances", "static"]
5423
  REQ_BGL = False
5424

    
5425
  def ExpandNames(self):
5426
    self.needed_locks = {}
5427
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
5428

    
5429
    if not isinstance(self.op.instances, list):
5430
      raise errors.OpPrereqError("Invalid argument type 'instances'")
5431

    
5432
    if self.op.instances:
5433
      self.wanted_names = []
5434
      for name in self.op.instances:
5435
        full_name = self.cfg.ExpandInstanceName(name)
5436
        if full_name is None:
5437
          raise errors.OpPrereqError("Instance '%s' not known" % name)
5438
        self.wanted_names.append(full_name)
5439
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
5440
    else:
5441
      self.wanted_names = None
5442
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
5443

    
5444
    self.needed_locks[locking.LEVEL_NODE] = []
5445
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5446

    
5447
  def DeclareLocks(self, level):
5448
    if level == locking.LEVEL_NODE:
5449
      self._LockInstancesNodes()
5450

    
5451
  def CheckPrereq(self):
5452
    """Check prerequisites.
5453

5454
    This only checks the optional instance list against the existing names.
5455

5456
    """
5457
    if self.wanted_names is None:
5458
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
5459

    
5460
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
5461
                             in self.wanted_names]
5462
    return
5463

    
5464
  def _ComputeDiskStatus(self, instance, snode, dev):
5465
    """Compute block device status.
5466

5467
    """
5468
    static = self.op.static
5469
    if not static:
5470
      self.cfg.SetDiskID(dev, instance.primary_node)
5471
      dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
5472
      msg = dev_pstatus.RemoteFailMsg()
5473
      if msg:
5474
        raise errors.OpExecError("Can't compute disk status for %s: %s" %
5475
                                 (instance.name, msg))
5476
      dev_pstatus = dev_pstatus.payload
5477
    else:
5478
      dev_pstatus = None
5479

    
5480
    if dev.dev_type in constants.LDS_DRBD:
5481
      # we change the snode then (otherwise we use the one passed in)
5482
      if dev.logical_id[0] == instance.primary_node:
5483
        snode = dev.logical_id[1]
5484
      else:
5485
        snode = dev.logical_id[0]
5486

    
5487
    if snode and not static:
5488
      self.cfg.SetDiskID(dev, snode)
5489
      dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
5490
      msg = dev_sstatus.RemoteFailMsg()
5491
      if msg:
5492
        raise errors.OpExecError("Can't compute disk status for %s: %s" %
5493
                                 (instance.name, msg))
5494
      dev_sstatus = dev_sstatus.payload
5495
    else:
5496
      dev_sstatus = None
5497

    
5498
    if dev.children:
5499
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
5500
                      for child in dev.children]
5501
    else:
5502
      dev_children = []
5503

    
5504
    data = {
5505
      "iv_name": dev.iv_name,
5506
      "dev_type": dev.dev_type,
5507
      "logical_id": dev.logical_id,
5508
      "physical_id": dev.physical_id,
5509
      "pstatus": dev_pstatus,
5510
      "sstatus": dev_sstatus,
5511
      "children": dev_children,
5512
      "mode": dev.mode,
5513
      }
5514

    
5515
    return data
5516

    
5517
  def Exec(self, feedback_fn):
5518
    """Gather and return data"""
5519
    result = {}
5520

    
5521
    cluster = self.cfg.GetClusterInfo()
5522

    
5523
    for instance in self.wanted_instances:
5524
      if not self.op.static:
5525
        remote_info = self.rpc.call_instance_info(instance.primary_node,
5526
                                                  instance.name,
5527
                                                  instance.hypervisor)
5528
        remote_info.Raise()
5529
        remote_info = remote_info.data
5530
        if remote_info and "state" in remote_info:
5531
          remote_state = "up"
5532
        else:
5533
          remote_state = "down"
5534
      else:
5535
        remote_state = None
5536
      if instance.admin_up:
5537
        config_state = "up"
5538
      else:
5539
        config_state = "down"
5540

    
5541
      disks = [self._ComputeDiskStatus(instance, None, device)
5542
               for device in instance.disks]
5543

    
5544
      idict = {
5545
        "name": instance.name,
5546
        "config_state": config_state,
5547
        "run_state": remote_state,
5548
        "pnode": instance.primary_node,
5549
        "snodes": instance.secondary_nodes,
5550
        "os": instance.os,
5551
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
5552
        "disks": disks,
5553
        "hypervisor": instance.hypervisor,
5554
        "network_port": instance.network_port,
5555
        "hv_instance": instance.hvparams,
5556
        "hv_actual": cluster.FillHV(instance),
5557
        "be_instance": instance.beparams,
5558
        "be_actual": cluster.FillBE(instance),
5559
        }
5560

    
5561
      result[instance.name] = idict
5562

    
5563
    return result
5564

    
5565

    
5566
class LUSetInstanceParams(LogicalUnit):
5567
  """Modifies an instances's parameters.
5568

5569
  """
5570
  HPATH = "instance-modify"
5571
  HTYPE = constants.HTYPE_INSTANCE
5572
  _OP_REQP = ["instance_name"]
5573
  REQ_BGL = False
5574

    
5575
  def CheckArguments(self):
5576
    if not hasattr(self.op, 'nics'):
5577
      self.op.nics = []
5578
    if not hasattr(self.op, 'disks'):
5579
      self.op.disks = []
5580
    if not hasattr(self.op, 'beparams'):
5581
      self.op.beparams = {}
5582
    if not hasattr(self.op, 'hvparams'):
5583
      self.op.hvparams = {}
5584
    self.op.force = getattr(self.op, "force", False)
5585
    if not (self.op.nics or self.op.disks or
5586
            self.op.hvparams or self.op.beparams):
5587
      raise errors.OpPrereqError("No changes submitted")
5588

    
5589
    utils.CheckBEParams(self.op.beparams)
5590

    
5591
    # Disk validation
5592
    disk_addremove = 0
5593
    for disk_op, disk_dict in self.op.disks:
5594
      if disk_op == constants.DDM_REMOVE:
5595
        disk_addremove += 1
5596
        continue
5597
      elif disk_op == constants.DDM_ADD:
5598
        disk_addremove += 1
5599
      else:
5600
        if not isinstance(disk_op, int):
5601
          raise errors.OpPrereqError("Invalid disk index")
5602
      if disk_op == constants.DDM_ADD:
5603
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
5604
        if mode not in constants.DISK_ACCESS_SET:
5605
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode)
5606
        size = disk_dict.get('size', None)
5607
        if size is None:
5608
          raise errors.OpPrereqError("Required disk parameter size missing")
5609
        try:
5610
          size = int(size)
5611
        except ValueError, err:
5612
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
5613
                                     str(err))
5614
        disk_dict['size'] = size
5615
      else:
5616
        # modification of disk
5617
        if 'size' in disk_dict:
5618
          raise errors.OpPrereqError("Disk size change not possible, use"
5619
                                     " grow-disk")
5620

    
5621
    if disk_addremove > 1:
5622
      raise errors.OpPrereqError("Only one disk add or remove operation"
5623
                                 " supported at a time")
5624

    
5625
    # NIC validation
5626
    nic_addremove = 0
5627
    for nic_op, nic_dict in self.op.nics:
5628
      if nic_op == constants.DDM_REMOVE:
5629
        nic_addremove += 1
5630
        continue
5631
      elif nic_op == constants.DDM_ADD:
5632
        nic_addremove += 1
5633
      else:
5634
        if not isinstance(nic_op, int):
5635
          raise errors.OpPrereqError("Invalid nic index")
5636

    
5637
      # nic_dict should be a dict
5638
      nic_ip = nic_dict.get('ip', None)
5639
      if nic_ip is not None:
5640
        if nic_ip.lower() == "none":
5641
          nic_dict['ip'] = None
5642
        else:
5643
          if not utils.IsValidIP(nic_ip):
5644
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip)
5645
      # we can only check None bridges and assign the default one
5646
      nic_bridge = nic_dict.get('bridge', None)
5647
      if nic_bridge is None:
5648
        nic_dict['bridge'] = self.cfg.GetDefBridge()
5649
      # but we can validate MACs
5650
      nic_mac = nic_dict.get('mac', None)
5651
      if nic_mac is not None:
5652
        if self.cfg.IsMacInUse(nic_mac):
5653
          raise errors.OpPrereqError("MAC address %s already in use"
5654
                                     " in cluster" % nic_mac)
5655
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5656
          if not utils.IsValidMac(nic_mac):
5657
            raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac)
5658
    if nic_addremove > 1:
5659
      raise errors.OpPrereqError("Only one NIC add or remove operation"
5660
                                 " supported at a time")
5661

    
5662
  def ExpandNames(self):
5663
    self._ExpandAndLockInstance()
5664
    self.needed_locks[locking.LEVEL_NODE] = []
5665
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5666

    
5667
  def DeclareLocks(self, level):
5668
    if level == locking.LEVEL_NODE:
5669
      self._LockInstancesNodes()
5670

    
5671
  def BuildHooksEnv(self):
5672
    """Build hooks env.
5673

5674
    This runs on the master, primary and secondaries.
5675

5676
    """
5677
    args = dict()
5678
    if constants.BE_MEMORY in self.be_new:
5679
      args['memory'] = self.be_new[constants.BE_MEMORY]
5680
    if constants.BE_VCPUS in self.be_new:
5681
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
5682
    # FIXME: readd disk/nic changes
5683
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
5684
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5685
    return env, nl, nl
5686

    
5687
  def CheckPrereq(self):
5688
    """Check prerequisites.
5689

5690
    This only checks the instance list against the existing names.
5691

5692
    """
5693
    force = self.force = self.op.force
5694

    
5695
    # checking the new params on the primary/secondary nodes
5696

    
5697
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5698
    assert self.instance is not None, \
5699
      "Cannot retrieve locked instance %s" % self.op.instance_name
5700
    pnode = instance.primary_node
5701
    nodelist = list(instance.all_nodes)
5702

    
5703
    # hvparams processing
5704
    if self.op.hvparams:
5705
      i_hvdict = copy.deepcopy(instance.hvparams)
5706
      for key, val in self.op.hvparams.iteritems():
5707
        if val == constants.VALUE_DEFAULT:
5708
          try:
5709
            del i_hvdict[key]
5710
          except KeyError:
5711
            pass
5712
        elif val == constants.VALUE_NONE:
5713
          i_hvdict[key] = None
5714
        else:
5715
          i_hvdict[key] = val
5716
      cluster = self.cfg.GetClusterInfo()
5717
      hv_new = cluster.FillDict(cluster.hvparams[instance.hypervisor],
5718
                                i_hvdict)
5719
      # local check
5720
      hypervisor.GetHypervisor(
5721
        instance.hypervisor).CheckParameterSyntax(hv_new)
5722
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
5723
      self.hv_new = hv_new # the new actual values
5724
      self.hv_inst = i_hvdict # the new dict (without defaults)
5725
    else:
5726
      self.hv_new = self.hv_inst = {}
5727

    
5728
    # beparams processing
5729
    if self.op.beparams:
5730
      i_bedict = copy.deepcopy(instance.beparams)
5731
      for key, val in self.op.beparams.iteritems():
5732
        if val == constants.VALUE_DEFAULT:
5733
          try:
5734
            del i_bedict[key]
5735
          except KeyError:
5736
            pass
5737
        else:
5738
          i_bedict[key] = val
5739
      cluster = self.cfg.GetClusterInfo()
5740
      be_new = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
5741
                                i_bedict)
5742
      self.be_new = be_new # the new actual values
5743
      self.be_inst = i_bedict # the new dict (without defaults)
5744
    else:
5745
      self.be_new = self.be_inst = {}
5746

    
5747
    self.warn = []
5748

    
5749
    if constants.BE_MEMORY in self.op.beparams and not self.force:
5750
      mem_check_list = [pnode]
5751
      if be_new[constants.BE_AUTO_BALANCE]:
5752
        # either we changed auto_balance to yes or it was from before
5753
        mem_check_list.extend(instance.secondary_nodes)
5754
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
5755
                                                  instance.hypervisor)
5756
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
5757
                                         instance.hypervisor)
5758
      if nodeinfo[pnode].failed or not isinstance(nodeinfo[pnode].data, dict):
5759
        # Assume the primary node is unreachable and go ahead
5760
        self.warn.append("Can't get info from primary node %s" % pnode)
5761
      else:
5762
        if not instance_info.failed and instance_info.data:
5763
          current_mem = instance_info.data['memory']
5764
        else:
5765
          # Assume instance not running
5766
          # (there is a slight race condition here, but it's not very probable,
5767
          # and we have no other way to check)
5768
          current_mem = 0
5769
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
5770
                    nodeinfo[pnode].data['memory_free'])
5771
        if miss_mem > 0:
5772
          raise errors.OpPrereqError("This change will prevent the instance"
5773
                                     " from starting, due to %d MB of memory"
5774
                                     " missing on its primary node" % miss_mem)
5775

    
5776
      if be_new[constants.BE_AUTO_BALANCE]:
5777
        for node, nres in nodeinfo.iteritems():
5778
          if node not in instance.secondary_nodes:
5779
            continue
5780
          if nres.failed or not isinstance(nres.data, dict):
5781
            self.warn.append("Can't get info from secondary node %s" % node)
5782
          elif be_new[constants.BE_MEMORY] > nres.data['memory_free']:
5783
            self.warn.append("Not enough memory to failover instance to"
5784
                             " secondary node %s" % node)
5785

    
5786
    # NIC processing
5787
    for nic_op, nic_dict in self.op.nics:
5788
      if nic_op == constants.DDM_REMOVE:
5789
        if not instance.nics:
5790
          raise errors.OpPrereqError("Instance has no NICs, cannot remove")
5791
        continue
5792
      if nic_op != constants.DDM_ADD:
5793
        # an existing nic
5794
        if nic_op < 0 or nic_op >= len(instance.nics):
5795
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
5796
                                     " are 0 to %d" %
5797
                                     (nic_op, len(instance.nics)))
5798
      nic_bridge = nic_dict.get('bridge', None)
5799
      if nic_bridge is not None:
5800
        if not self.rpc.call_bridges_exist(pnode, [nic_bridge]):
5801
          msg = ("Bridge '%s' doesn't exist on one of"
5802
                 " the instance nodes" % nic_bridge)
5803
          if self.force:
5804
            self.warn.append(msg)
5805
          else:
5806
            raise errors.OpPrereqError(msg)
5807

    
5808
    # DISK processing
5809
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
5810
      raise errors.OpPrereqError("Disk operations not supported for"
5811
                                 " diskless instances")
5812
    for disk_op, disk_dict in self.op.disks:
5813
      if disk_op == constants.DDM_REMOVE:
5814
        if len(instance.disks) == 1:
5815
          raise errors.OpPrereqError("Cannot remove the last disk of"
5816
                                     " an instance")
5817
        ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
5818
        ins_l = ins_l[pnode]
5819
        if ins_l.failed or not isinstance(ins_l.data, list):
5820
          raise errors.OpPrereqError("Can't contact node '%s'" % pnode)
5821
        if instance.name in ins_l.data:
5822
          raise errors.OpPrereqError("Instance is running, can't remove"
5823
                                     " disks.")
5824

    
5825
      if (disk_op == constants.DDM_ADD and
5826
          len(instance.nics) >= constants.MAX_DISKS):
5827
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
5828
                                   " add more" % constants.MAX_DISKS)
5829
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
5830
        # an existing disk
5831
        if disk_op < 0 or disk_op >= len(instance.disks):
5832
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
5833
                                     " are 0 to %d" %
5834
                                     (disk_op, len(instance.disks)))
5835

    
5836
    return
5837

    
5838
  def Exec(self, feedback_fn):
5839
    """Modifies an instance.
5840

5841
    All parameters take effect only at the next restart of the instance.
5842

5843
    """
5844
    # Process here the warnings from CheckPrereq, as we don't have a
5845
    # feedback_fn there.
5846
    for warn in self.warn:
5847
      feedback_fn("WARNING: %s" % warn)
5848

    
5849
    result = []
5850
    instance = self.instance
5851
    # disk changes
5852
    for disk_op, disk_dict in self.op.disks:
5853
      if disk_op == constants.DDM_REMOVE:
5854
        # remove the last disk
5855
        device = instance.disks.pop()
5856
        device_idx = len(instance.disks)
5857
        for node, disk in device.ComputeNodeTree(instance.primary_node):
5858
          self.cfg.SetDiskID(disk, node)
5859
          msg = self.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
5860
          if msg:
5861
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
5862
                            " continuing anyway", device_idx, node, msg)
5863
        result.append(("disk/%d" % device_idx, "remove"))
5864
      elif disk_op == constants.DDM_ADD:
5865
        # add a new disk
5866
        if instance.disk_template == constants.DT_FILE:
5867
          file_driver, file_path = instance.disks[0].logical_id
5868
          file_path = os.path.dirname(file_path)
5869
        else:
5870
          file_driver = file_path = None
5871
        disk_idx_base = len(instance.disks)
5872
        new_disk = _GenerateDiskTemplate(self,
5873
                                         instance.disk_template,
5874
                                         instance.name, instance.primary_node,
5875
                                         instance.secondary_nodes,
5876
                                         [disk_dict],
5877
                                         file_path,
5878
                                         file_driver,
5879
                                         disk_idx_base)[0]
5880
        instance.disks.append(new_disk)
5881
        info = _GetInstanceInfoText(instance)
5882

    
5883
        logging.info("Creating volume %s for instance %s",
5884
                     new_disk.iv_name, instance.name)
5885
        # Note: this needs to be kept in sync with _CreateDisks
5886
        #HARDCODE
5887
        for node in instance.all_nodes:
5888
          f_create = node == instance.primary_node
5889
          try:
5890
            _CreateBlockDev(self, node, instance, new_disk,
5891
                            f_create, info, f_create)
5892
          except errors.OpExecError, err:
5893
            self.LogWarning("Failed to create volume %s (%s) on"
5894
                            " node %s: %s",
5895
                            new_disk.iv_name, new_disk, node, err)
5896
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
5897
                       (new_disk.size, new_disk.mode)))
5898
      else:
5899
        # change a given disk
5900
        instance.disks[disk_op].mode = disk_dict['mode']
5901
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
5902
    # NIC changes
5903
    for nic_op, nic_dict in self.op.nics:
5904
      if nic_op == constants.DDM_REMOVE:
5905
        # remove the last nic
5906
        del instance.nics[-1]
5907
        result.append(("nic.%d" % len(instance.nics), "remove"))
5908
      elif nic_op == constants.DDM_ADD:
5909
        # add a new nic
5910
        if 'mac' not in nic_dict:
5911
          mac = constants.VALUE_GENERATE
5912
        else:
5913
          mac = nic_dict['mac']
5914
        if mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5915
          mac = self.cfg.GenerateMAC()
5916
        new_nic = objects.NIC(mac=mac, ip=nic_dict.get('ip', None),
5917
                              bridge=nic_dict.get('bridge', None))
5918
        instance.nics.append(new_nic)
5919
        result.append(("nic.%d" % (len(instance.nics) - 1),
5920
                       "add:mac=%s,ip=%s,bridge=%s" %
5921
                       (new_nic.mac, new_nic.ip, new_nic.bridge)))
5922
      else:
5923
        # change a given nic
5924
        for key in 'mac', 'ip', 'bridge':
5925
          if key in nic_dict:
5926
            setattr(instance.nics[nic_op], key, nic_dict[key])
5927
            result.append(("nic.%s/%d" % (key, nic_op), nic_dict[key]))
5928

    
5929
    # hvparams changes
5930
    if self.op.hvparams:
5931
      instance.hvparams = self.hv_inst
5932
      for key, val in self.op.hvparams.iteritems():
5933
        result.append(("hv/%s" % key, val))
5934

    
5935
    # beparams changes
5936
    if self.op.beparams:
5937
      instance.beparams = self.be_inst
5938
      for key, val in self.op.beparams.iteritems():
5939
        result.append(("be/%s" % key, val))
5940

    
5941
    self.cfg.Update(instance)
5942

    
5943
    return result
5944

    
5945

    
5946
class LUQueryExports(NoHooksLU):
5947
  """Query the exports list
5948

5949
  """
5950
  _OP_REQP = ['nodes']
5951
  REQ_BGL = False
5952

    
5953
  def ExpandNames(self):
5954
    self.needed_locks = {}
5955
    self.share_locks[locking.LEVEL_NODE] = 1
5956
    if not self.op.nodes:
5957
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5958
    else:
5959
      self.needed_locks[locking.LEVEL_NODE] = \
5960
        _GetWantedNodes(self, self.op.nodes)
5961

    
5962
  def CheckPrereq(self):
5963
    """Check prerequisites.
5964

5965
    """
5966
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
5967

    
5968
  def Exec(self, feedback_fn):
5969
    """Compute the list of all the exported system images.
5970

5971
    @rtype: dict
5972
    @return: a dictionary with the structure node->(export-list)
5973
        where export-list is a list of the instances exported on
5974
        that node.
5975

5976
    """
5977
    rpcresult = self.rpc.call_export_list(self.nodes)
5978
    result = {}
5979
    for node in rpcresult:
5980
      if rpcresult[node].failed:
5981
        result[node] = False
5982
      else:
5983
        result[node] = rpcresult[node].data
5984

    
5985
    return result
5986

    
5987

    
5988
class LUExportInstance(LogicalUnit):
5989
  """Export an instance to an image in the cluster.
5990

5991
  """
5992
  HPATH = "instance-export"
5993
  HTYPE = constants.HTYPE_INSTANCE
5994
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
5995
  REQ_BGL = False
5996

    
5997
  def ExpandNames(self):
5998
    self._ExpandAndLockInstance()
5999
    # FIXME: lock only instance primary and destination node
6000
    #
6001
    # Sad but true, for now we have do lock all nodes, as we don't know where
6002
    # the previous export might be, and and in this LU we search for it and
6003
    # remove it from its current node. In the future we could fix this by:
6004
    #  - making a tasklet to search (share-lock all), then create the new one,
6005
    #    then one to remove, after
6006
    #  - removing the removal operation altoghether
6007
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6008

    
6009
  def DeclareLocks(self, level):
6010
    """Last minute lock declaration."""
6011
    # All nodes are locked anyway, so nothing to do here.
6012

    
6013
  def BuildHooksEnv(self):
6014
    """Build hooks env.
6015

6016
    This will run on the master, primary node and target node.
6017

6018
    """
6019
    env = {
6020
      "EXPORT_NODE": self.op.target_node,
6021
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
6022
      }
6023
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6024
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
6025
          self.op.target_node]
6026
    return env, nl, nl
6027

    
6028
  def CheckPrereq(self):
6029
    """Check prerequisites.
6030

6031
    This checks that the instance and node names are valid.
6032

6033
    """
6034
    instance_name = self.op.instance_name
6035
    self.instance = self.cfg.GetInstanceInfo(instance_name)
6036
    assert self.instance is not None, \
6037
          "Cannot retrieve locked instance %s" % self.op.instance_name
6038
    _CheckNodeOnline(self, self.instance.primary_node)
6039

    
6040
    self.dst_node = self.cfg.GetNodeInfo(
6041
      self.cfg.ExpandNodeName(self.op.target_node))
6042

    
6043
    if self.dst_node is None:
6044
      # This is wrong node name, not a non-locked node
6045
      raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node)
6046
    _CheckNodeOnline(self, self.dst_node.name)
6047
    _CheckNodeNotDrained(self, self.dst_node.name)
6048

    
6049
    # instance disk type verification
6050
    for disk in self.instance.disks:
6051
      if disk.dev_type == constants.LD_FILE:
6052
        raise errors.OpPrereqError("Export not supported for instances with"
6053
                                   " file-based disks")
6054

    
6055
  def Exec(self, feedback_fn):
6056
    """Export an instance to an image in the cluster.
6057

6058
    """
6059
    instance = self.instance
6060
    dst_node = self.dst_node
6061
    src_node = instance.primary_node
6062
    if self.op.shutdown:
6063
      # shutdown the instance, but not the disks
6064
      result = self.rpc.call_instance_shutdown(src_node, instance)
6065
      result.Raise()
6066
      if not result.data:
6067
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
6068
                                 (instance.name, src_node))
6069

    
6070
    vgname = self.cfg.GetVGName()
6071

    
6072
    snap_disks = []
6073

    
6074
    # set the disks ID correctly since call_instance_start needs the
6075
    # correct drbd minor to create the symlinks
6076
    for disk in instance.disks:
6077
      self.cfg.SetDiskID(disk, src_node)
6078

    
6079
    try:
6080
      for disk in instance.disks:
6081
        # new_dev_name will be a snapshot of an lvm leaf of the one we passed
6082
        new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk)
6083
        if new_dev_name.failed or not new_dev_name.data:
6084
          self.LogWarning("Could not snapshot block device %s on node %s",
6085
                          disk.logical_id[1], src_node)
6086
          snap_disks.append(False)
6087
        else:
6088
          new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
6089
                                 logical_id=(vgname, new_dev_name.data),
6090
                                 physical_id=(vgname, new_dev_name.data),
6091
                                 iv_name=disk.iv_name)
6092
          snap_disks.append(new_dev)
6093

    
6094
    finally:
6095
      if self.op.shutdown and instance.admin_up:
6096
        result = self.rpc.call_instance_start(src_node, instance, None)
6097
        msg = result.RemoteFailMsg()
6098
        if msg:
6099
          _ShutdownInstanceDisks(self, instance)
6100
          raise errors.OpExecError("Could not start instance: %s" % msg)
6101

    
6102
    # TODO: check for size
6103

    
6104
    cluster_name = self.cfg.GetClusterName()
6105
    for idx, dev in enumerate(snap_disks):
6106
      if dev:
6107
        result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
6108
                                               instance, cluster_name, idx)
6109
        if result.failed or not result.data:
6110
          self.LogWarning("Could not export block device %s from node %s to"
6111
                          " node %s", dev.logical_id[1], src_node,
6112
                          dst_node.name)
6113
        msg = self.rpc.call_blockdev_remove(src_node, dev).RemoteFailMsg()
6114
        if msg:
6115
          self.LogWarning("Could not remove snapshot block device %s from node"
6116
                          " %s: %s", dev.logical_id[1], src_node, msg)
6117

    
6118
    result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
6119
    if result.failed or not result.data:
6120
      self.LogWarning("Could not finalize export for instance %s on node %s",
6121
                      instance.name, dst_node.name)
6122

    
6123
    nodelist = self.cfg.GetNodeList()
6124
    nodelist.remove(dst_node.name)
6125

    
6126
    # on one-node clusters nodelist will be empty after the removal
6127
    # if we proceed the backup would be removed because OpQueryExports
6128
    # substitutes an empty list with the full cluster node list.
6129
    if nodelist:
6130
      exportlist = self.rpc.call_export_list(nodelist)
6131
      for node in exportlist:
6132
        if exportlist[node].failed:
6133
          continue
6134
        if instance.name in exportlist[node].data:
6135
          if not self.rpc.call_export_remove(node, instance.name):
6136
            self.LogWarning("Could not remove older export for instance %s"
6137
                            " on node %s", instance.name, node)
6138

    
6139

    
6140
class LURemoveExport(NoHooksLU):
6141
  """Remove exports related to the named instance.
6142

6143
  """
6144
  _OP_REQP = ["instance_name"]
6145
  REQ_BGL = False
6146

    
6147
  def ExpandNames(self):
6148
    self.needed_locks = {}
6149
    # We need all nodes to be locked in order for RemoveExport to work, but we
6150
    # don't need to lock the instance itself, as nothing will happen to it (and
6151
    # we can remove exports also for a removed instance)
6152
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6153

    
6154
  def CheckPrereq(self):
6155
    """Check prerequisites.
6156
    """
6157
    pass
6158

    
6159
  def Exec(self, feedback_fn):
6160
    """Remove any export.
6161

6162
    """
6163
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
6164
    # If the instance was not found we'll try with the name that was passed in.
6165
    # This will only work if it was an FQDN, though.
6166
    fqdn_warn = False
6167
    if not instance_name:
6168
      fqdn_warn = True
6169
      instance_name = self.op.instance_name
6170

    
6171
    exportlist = self.rpc.call_export_list(self.acquired_locks[
6172
      locking.LEVEL_NODE])
6173
    found = False
6174
    for node in exportlist:
6175
      if exportlist[node].failed:
6176
        self.LogWarning("Failed to query node %s, continuing" % node)
6177
        continue
6178
      if instance_name in exportlist[node].data:
6179
        found = True
6180
        result = self.rpc.call_export_remove(node, instance_name)
6181
        if result.failed or not result.data:
6182
          logging.error("Could not remove export for instance %s"
6183
                        " on node %s", instance_name, node)
6184

    
6185
    if fqdn_warn and not found:
6186
      feedback_fn("Export not found. If trying to remove an export belonging"
6187
                  " to a deleted instance please use its Fully Qualified"
6188
                  " Domain Name.")
6189

    
6190

    
6191
class TagsLU(NoHooksLU):
6192
  """Generic tags LU.
6193

6194
  This is an abstract class which is the parent of all the other tags LUs.
6195

6196
  """
6197

    
6198
  def ExpandNames(self):
6199
    self.needed_locks = {}
6200
    if self.op.kind == constants.TAG_NODE:
6201
      name = self.cfg.ExpandNodeName(self.op.name)
6202
      if name is None:
6203
        raise errors.OpPrereqError("Invalid node name (%s)" %
6204
                                   (self.op.name,))
6205
      self.op.name = name
6206
      self.needed_locks[locking.LEVEL_NODE] = name
6207
    elif self.op.kind == constants.TAG_INSTANCE:
6208
      name = self.cfg.ExpandInstanceName(self.op.name)
6209
      if name is None:
6210
        raise errors.OpPrereqError("Invalid instance name (%s)" %
6211
                                   (self.op.name,))
6212
      self.op.name = name
6213
      self.needed_locks[locking.LEVEL_INSTANCE] = name
6214

    
6215
  def CheckPrereq(self):
6216
    """Check prerequisites.
6217

6218
    """
6219
    if self.op.kind == constants.TAG_CLUSTER:
6220
      self.target = self.cfg.GetClusterInfo()
6221
    elif self.op.kind == constants.TAG_NODE:
6222
      self.target = self.cfg.GetNodeInfo(self.op.name)
6223
    elif self.op.kind == constants.TAG_INSTANCE:
6224
      self.target = self.cfg.GetInstanceInfo(self.op.name)
6225
    else:
6226
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
6227
                                 str(self.op.kind))
6228

    
6229

    
6230
class LUGetTags(TagsLU):
6231
  """Returns the tags of a given object.
6232

6233
  """
6234
  _OP_REQP = ["kind", "name"]
6235
  REQ_BGL = False
6236

    
6237
  def Exec(self, feedback_fn):
6238
    """Returns the tag list.
6239

6240
    """
6241
    return list(self.target.GetTags())
6242

    
6243

    
6244
class LUSearchTags(NoHooksLU):
6245
  """Searches the tags for a given pattern.
6246

6247
  """
6248
  _OP_REQP = ["pattern"]
6249
  REQ_BGL = False
6250

    
6251
  def ExpandNames(self):
6252
    self.needed_locks = {}
6253

    
6254
  def CheckPrereq(self):
6255
    """Check prerequisites.
6256

6257
    This checks the pattern passed for validity by compiling it.
6258

6259
    """
6260
    try:
6261
      self.re = re.compile(self.op.pattern)
6262
    except re.error, err:
6263
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
6264
                                 (self.op.pattern, err))
6265

    
6266
  def Exec(self, feedback_fn):
6267
    """Returns the tag list.
6268

6269
    """
6270
    cfg = self.cfg
6271
    tgts = [("/cluster", cfg.GetClusterInfo())]
6272
    ilist = cfg.GetAllInstancesInfo().values()
6273
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
6274
    nlist = cfg.GetAllNodesInfo().values()
6275
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
6276
    results = []
6277
    for path, target in tgts:
6278
      for tag in target.GetTags():
6279
        if self.re.search(tag):
6280
          results.append((path, tag))
6281
    return results
6282

    
6283

    
6284
class LUAddTags(TagsLU):
6285
  """Sets a tag on a given object.
6286

6287
  """
6288
  _OP_REQP = ["kind", "name", "tags"]
6289
  REQ_BGL = False
6290

    
6291
  def CheckPrereq(self):
6292
    """Check prerequisites.
6293

6294
    This checks the type and length of the tag name and value.
6295

6296
    """
6297
    TagsLU.CheckPrereq(self)
6298
    for tag in self.op.tags:
6299
      objects.TaggableObject.ValidateTag(tag)
6300

    
6301
  def Exec(self, feedback_fn):
6302
    """Sets the tag.
6303

6304
    """
6305
    try:
6306
      for tag in self.op.tags:
6307
        self.target.AddTag(tag)
6308
    except errors.TagError, err:
6309
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
6310
    try:
6311
      self.cfg.Update(self.target)
6312
    except errors.ConfigurationError:
6313
      raise errors.OpRetryError("There has been a modification to the"
6314
                                " config file and the operation has been"
6315
                                " aborted. Please retry.")
6316

    
6317

    
6318
class LUDelTags(TagsLU):
6319
  """Delete a list of tags from a given object.
6320

6321
  """
6322
  _OP_REQP = ["kind", "name", "tags"]
6323
  REQ_BGL = False
6324

    
6325
  def CheckPrereq(self):
6326
    """Check prerequisites.
6327

6328
    This checks that we have the given tag.
6329

6330
    """
6331
    TagsLU.CheckPrereq(self)
6332
    for tag in self.op.tags:
6333
      objects.TaggableObject.ValidateTag(tag)
6334
    del_tags = frozenset(self.op.tags)
6335
    cur_tags = self.target.GetTags()
6336
    if not del_tags <= cur_tags:
6337
      diff_tags = del_tags - cur_tags
6338
      diff_names = ["'%s'" % tag for tag in diff_tags]
6339
      diff_names.sort()
6340
      raise errors.OpPrereqError("Tag(s) %s not found" %
6341
                                 (",".join(diff_names)))
6342

    
6343
  def Exec(self, feedback_fn):
6344
    """Remove the tag from the object.
6345

6346
    """
6347
    for tag in self.op.tags:
6348
      self.target.RemoveTag(tag)
6349
    try:
6350
      self.cfg.Update(self.target)
6351
    except errors.ConfigurationError:
6352
      raise errors.OpRetryError("There has been a modification to the"
6353
                                " config file and the operation has been"
6354
                                " aborted. Please retry.")
6355

    
6356

    
6357
class LUTestDelay(NoHooksLU):
6358
  """Sleep for a specified amount of time.
6359

6360
  This LU sleeps on the master and/or nodes for a specified amount of
6361
  time.
6362

6363
  """
6364
  _OP_REQP = ["duration", "on_master", "on_nodes"]
6365
  REQ_BGL = False
6366

    
6367
  def ExpandNames(self):
6368
    """Expand names and set required locks.
6369

6370
    This expands the node list, if any.
6371

6372
    """
6373
    self.needed_locks = {}
6374
    if self.op.on_nodes:
6375
      # _GetWantedNodes can be used here, but is not always appropriate to use
6376
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
6377
      # more information.
6378
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
6379
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
6380

    
6381
  def CheckPrereq(self):
6382
    """Check prerequisites.
6383

6384
    """
6385

    
6386
  def Exec(self, feedback_fn):
6387
    """Do the actual sleep.
6388

6389
    """
6390
    if self.op.on_master:
6391
      if not utils.TestDelay(self.op.duration):
6392
        raise errors.OpExecError("Error during master delay test")
6393
    if self.op.on_nodes:
6394
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
6395
      if not result:
6396
        raise errors.OpExecError("Complete failure from rpc call")
6397
      for node, node_result in result.items():
6398
        node_result.Raise()
6399
        if not node_result.data:
6400
          raise errors.OpExecError("Failure during rpc call to node %s,"
6401
                                   " result: %s" % (node, node_result.data))
6402

    
6403

    
6404
class IAllocator(object):
6405
  """IAllocator framework.
6406

6407
  An IAllocator instance has three sets of attributes:
6408
    - cfg that is needed to query the cluster
6409
    - input data (all members of the _KEYS class attribute are required)
6410
    - four buffer attributes (in|out_data|text), that represent the
6411
      input (to the external script) in text and data structure format,
6412
      and the output from it, again in two formats
6413
    - the result variables from the script (success, info, nodes) for
6414
      easy usage
6415

6416
  """
6417
  _ALLO_KEYS = [
6418
    "mem_size", "disks", "disk_template",
6419
    "os", "tags", "nics", "vcpus", "hypervisor",
6420
    ]
6421
  _RELO_KEYS = [
6422
    "relocate_from",
6423
    ]
6424

    
6425
  def __init__(self, lu, mode, name, **kwargs):
6426
    self.lu = lu
6427
    # init buffer variables
6428
    self.in_text = self.out_text = self.in_data = self.out_data = None
6429
    # init all input fields so that pylint is happy
6430
    self.mode = mode
6431
    self.name = name
6432
    self.mem_size = self.disks = self.disk_template = None
6433
    self.os = self.tags = self.nics = self.vcpus = None
6434
    self.hypervisor = None
6435
    self.relocate_from = None
6436
    # computed fields
6437
    self.required_nodes = None
6438
    # init result fields
6439
    self.success = self.info = self.nodes = None
6440
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6441
      keyset = self._ALLO_KEYS
6442
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6443
      keyset = self._RELO_KEYS
6444
    else:
6445
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
6446
                                   " IAllocator" % self.mode)
6447
    for key in kwargs:
6448
      if key not in keyset:
6449
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
6450
                                     " IAllocator" % key)
6451
      setattr(self, key, kwargs[key])
6452
    for key in keyset:
6453
      if key not in kwargs:
6454
        raise errors.ProgrammerError("Missing input parameter '%s' to"
6455
                                     " IAllocator" % key)
6456
    self._BuildInputData()
6457

    
6458
  def _ComputeClusterData(self):
6459
    """Compute the generic allocator input data.
6460

6461
    This is the data that is independent of the actual operation.
6462

6463
    """
6464
    cfg = self.lu.cfg
6465
    cluster_info = cfg.GetClusterInfo()
6466
    # cluster data
6467
    data = {
6468
      "version": 1,
6469
      "cluster_name": cfg.GetClusterName(),
6470
      "cluster_tags": list(cluster_info.GetTags()),
6471
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
6472
      # we don't have job IDs
6473
      }
6474
    iinfo = cfg.GetAllInstancesInfo().values()
6475
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
6476

    
6477
    # node data
6478
    node_results = {}
6479
    node_list = cfg.GetNodeList()
6480

    
6481
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6482
      hypervisor_name = self.hypervisor
6483
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6484
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
6485

    
6486
    node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
6487
                                           hypervisor_name)
6488
    node_iinfo = self.lu.rpc.call_all_instances_info(node_list,
6489
                       cluster_info.enabled_hypervisors)
6490
    for nname, nresult in node_data.items():
6491
      # first fill in static (config-based) values
6492
      ninfo = cfg.GetNodeInfo(nname)
6493
      pnr = {
6494
        "tags": list(ninfo.GetTags()),
6495
        "primary_ip": ninfo.primary_ip,
6496
        "secondary_ip": ninfo.secondary_ip,
6497
        "offline": ninfo.offline,
6498
        "drained": ninfo.drained,
6499
        "master_candidate": ninfo.master_candidate,
6500
        }
6501

    
6502
      if not ninfo.offline:
6503
        nresult.Raise()
6504
        if not isinstance(nresult.data, dict):
6505
          raise errors.OpExecError("Can't get data for node %s" % nname)
6506
        remote_info = nresult.data
6507
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
6508
                     'vg_size', 'vg_free', 'cpu_total']:
6509
          if attr not in remote_info:
6510
            raise errors.OpExecError("Node '%s' didn't return attribute"
6511
                                     " '%s'" % (nname, attr))
6512
          try:
6513
            remote_info[attr] = int(remote_info[attr])
6514
          except ValueError, err:
6515
            raise errors.OpExecError("Node '%s' returned invalid value"
6516
                                     " for '%s': %s" % (nname, attr, err))
6517
        # compute memory used by primary instances
6518
        i_p_mem = i_p_up_mem = 0
6519
        for iinfo, beinfo in i_list:
6520
          if iinfo.primary_node == nname:
6521
            i_p_mem += beinfo[constants.BE_MEMORY]
6522
            if iinfo.name not in node_iinfo[nname].data:
6523
              i_used_mem = 0
6524
            else:
6525
              i_used_mem = int(node_iinfo[nname].data[iinfo.name]['memory'])
6526
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
6527
            remote_info['memory_free'] -= max(0, i_mem_diff)
6528

    
6529
            if iinfo.admin_up:
6530
              i_p_up_mem += beinfo[constants.BE_MEMORY]
6531

    
6532
        # compute memory used by instances
6533
        pnr_dyn = {
6534
          "total_memory": remote_info['memory_total'],
6535
          "reserved_memory": remote_info['memory_dom0'],
6536
          "free_memory": remote_info['memory_free'],
6537
          "total_disk": remote_info['vg_size'],
6538
          "free_disk": remote_info['vg_free'],
6539
          "total_cpus": remote_info['cpu_total'],
6540
          "i_pri_memory": i_p_mem,
6541
          "i_pri_up_memory": i_p_up_mem,
6542
          }
6543
        pnr.update(pnr_dyn)
6544

    
6545
      node_results[nname] = pnr
6546
    data["nodes"] = node_results
6547

    
6548
    # instance data
6549
    instance_data = {}
6550
    for iinfo, beinfo in i_list:
6551
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
6552
                  for n in iinfo.nics]
6553
      pir = {
6554
        "tags": list(iinfo.GetTags()),
6555
        "admin_up": iinfo.admin_up,
6556
        "vcpus": beinfo[constants.BE_VCPUS],
6557
        "memory": beinfo[constants.BE_MEMORY],
6558
        "os": iinfo.os,
6559
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
6560
        "nics": nic_data,
6561
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
6562
        "disk_template": iinfo.disk_template,
6563
        "hypervisor": iinfo.hypervisor,
6564
        }
6565
      instance_data[iinfo.name] = pir
6566

    
6567
    data["instances"] = instance_data
6568

    
6569
    self.in_data = data
6570

    
6571
  def _AddNewInstance(self):
6572
    """Add new instance data to allocator structure.
6573

6574
    This in combination with _AllocatorGetClusterData will create the
6575
    correct structure needed as input for the allocator.
6576

6577
    The checks for the completeness of the opcode must have already been
6578
    done.
6579

6580
    """
6581
    data = self.in_data
6582

    
6583
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
6584

    
6585
    if self.disk_template in constants.DTS_NET_MIRROR:
6586
      self.required_nodes = 2
6587
    else:
6588
      self.required_nodes = 1
6589
    request = {
6590
      "type": "allocate",
6591
      "name": self.name,
6592
      "disk_template": self.disk_template,
6593
      "tags": self.tags,
6594
      "os": self.os,
6595
      "vcpus": self.vcpus,
6596
      "memory": self.mem_size,
6597
      "disks": self.disks,
6598
      "disk_space_total": disk_space,
6599
      "nics": self.nics,
6600
      "required_nodes": self.required_nodes,
6601
      }
6602
    data["request"] = request
6603

    
6604
  def _AddRelocateInstance(self):
6605
    """Add relocate instance data to allocator structure.
6606

6607
    This in combination with _IAllocatorGetClusterData will create the
6608
    correct structure needed as input for the allocator.
6609

6610
    The checks for the completeness of the opcode must have already been
6611
    done.
6612

6613
    """
6614
    instance = self.lu.cfg.GetInstanceInfo(self.name)
6615
    if instance is None:
6616
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
6617
                                   " IAllocator" % self.name)
6618

    
6619
    if instance.disk_template not in constants.DTS_NET_MIRROR:
6620
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
6621

    
6622
    if len(instance.secondary_nodes) != 1:
6623
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
6624

    
6625
    self.required_nodes = 1
6626
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
6627
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
6628

    
6629
    request = {
6630
      "type": "relocate",
6631
      "name": self.name,
6632
      "disk_space_total": disk_space,
6633
      "required_nodes": self.required_nodes,
6634
      "relocate_from": self.relocate_from,
6635
      }
6636
    self.in_data["request"] = request
6637

    
6638
  def _BuildInputData(self):
6639
    """Build input data structures.
6640

6641
    """
6642
    self._ComputeClusterData()
6643

    
6644
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6645
      self._AddNewInstance()
6646
    else:
6647
      self._AddRelocateInstance()
6648

    
6649
    self.in_text = serializer.Dump(self.in_data)
6650

    
6651
  def Run(self, name, validate=True, call_fn=None):
6652
    """Run an instance allocator and return the results.
6653

6654
    """
6655
    if call_fn is None:
6656
      call_fn = self.lu.rpc.call_iallocator_runner
6657
    data = self.in_text
6658

    
6659
    result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
6660
    result.Raise()
6661

    
6662
    if not isinstance(result.data, (list, tuple)) or len(result.data) != 4:
6663
      raise errors.OpExecError("Invalid result from master iallocator runner")
6664

    
6665
    rcode, stdout, stderr, fail = result.data
6666

    
6667
    if rcode == constants.IARUN_NOTFOUND:
6668
      raise errors.OpExecError("Can't find allocator '%s'" % name)
6669
    elif rcode == constants.IARUN_FAILURE:
6670
      raise errors.OpExecError("Instance allocator call failed: %s,"
6671
                               " output: %s" % (fail, stdout+stderr))
6672
    self.out_text = stdout
6673
    if validate:
6674
      self._ValidateResult()
6675

    
6676
  def _ValidateResult(self):
6677
    """Process the allocator results.
6678

6679
    This will process and if successful save the result in
6680
    self.out_data and the other parameters.
6681

6682
    """
6683
    try:
6684
      rdict = serializer.Load(self.out_text)
6685
    except Exception, err:
6686
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
6687

    
6688
    if not isinstance(rdict, dict):
6689
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
6690

    
6691
    for key in "success", "info", "nodes":
6692
      if key not in rdict:
6693
        raise errors.OpExecError("Can't parse iallocator results:"
6694
                                 " missing key '%s'" % key)
6695
      setattr(self, key, rdict[key])
6696

    
6697
    if not isinstance(rdict["nodes"], list):
6698
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
6699
                               " is not a list")
6700
    self.out_data = rdict
6701

    
6702

    
6703
class LUTestAllocator(NoHooksLU):
6704
  """Run allocator tests.
6705

6706
  This LU runs the allocator tests
6707

6708
  """
6709
  _OP_REQP = ["direction", "mode", "name"]
6710

    
6711
  def CheckPrereq(self):
6712
    """Check prerequisites.
6713

6714
    This checks the opcode parameters depending on the director and mode test.
6715

6716
    """
6717
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
6718
      for attr in ["name", "mem_size", "disks", "disk_template",
6719
                   "os", "tags", "nics", "vcpus"]:
6720
        if not hasattr(self.op, attr):
6721
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
6722
                                     attr)
6723
      iname = self.cfg.ExpandInstanceName(self.op.name)
6724
      if iname is not None:
6725
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
6726
                                   iname)
6727
      if not isinstance(self.op.nics, list):
6728
        raise errors.OpPrereqError("Invalid parameter 'nics'")
6729
      for row in self.op.nics:
6730
        if (not isinstance(row, dict) or
6731
            "mac" not in row or
6732
            "ip" not in row or
6733
            "bridge" not in row):
6734
          raise errors.OpPrereqError("Invalid contents of the"
6735
                                     " 'nics' parameter")
6736
      if not isinstance(self.op.disks, list):
6737
        raise errors.OpPrereqError("Invalid parameter 'disks'")
6738
      for row in self.op.disks:
6739
        if (not isinstance(row, dict) or
6740
            "size" not in row or
6741
            not isinstance(row["size"], int) or
6742
            "mode" not in row or
6743
            row["mode"] not in ['r', 'w']):
6744
          raise errors.OpPrereqError("Invalid contents of the"
6745
                                     " 'disks' parameter")
6746
      if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None:
6747
        self.op.hypervisor = self.cfg.GetHypervisorType()
6748
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
6749
      if not hasattr(self.op, "name"):
6750
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
6751
      fname = self.cfg.ExpandInstanceName(self.op.name)
6752
      if fname is None:
6753
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
6754
                                   self.op.name)
6755
      self.op.name = fname
6756
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
6757
    else:
6758
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
6759
                                 self.op.mode)
6760

    
6761
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
6762
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
6763
        raise errors.OpPrereqError("Missing allocator name")
6764
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
6765
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
6766
                                 self.op.direction)
6767

    
6768
  def Exec(self, feedback_fn):
6769
    """Run the allocator test.
6770

6771
    """
6772
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
6773
      ial = IAllocator(self,
6774
                       mode=self.op.mode,
6775
                       name=self.op.name,
6776
                       mem_size=self.op.mem_size,
6777
                       disks=self.op.disks,
6778
                       disk_template=self.op.disk_template,
6779
                       os=self.op.os,
6780
                       tags=self.op.tags,
6781
                       nics=self.op.nics,
6782
                       vcpus=self.op.vcpus,
6783
                       hypervisor=self.op.hypervisor,
6784
                       )
6785
    else:
6786
      ial = IAllocator(self,
6787
                       mode=self.op.mode,
6788
                       name=self.op.name,
6789
                       relocate_from=list(self.relocate_from),
6790
                       )
6791

    
6792
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
6793
      result = ial.in_text
6794
    else:
6795
      ial.Run(self.op.allocator, validate=False)
6796
      result = ial.out_text
6797
    return result