Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ a5728081

History | View | Annotate | Download (236.3 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the master-side code."""
23

    
24
# pylint: disable-msg=W0613,W0201
25

    
26
import os
27
import os.path
28
import sha
29
import time
30
import tempfile
31
import re
32
import platform
33
import logging
34
import copy
35
import random
36

    
37
from ganeti import ssh
38
from ganeti import utils
39
from ganeti import errors
40
from ganeti import hypervisor
41
from ganeti import locking
42
from ganeti import constants
43
from ganeti import objects
44
from ganeti import opcodes
45
from ganeti import serializer
46
from ganeti import ssconf
47

    
48

    
49
class LogicalUnit(object):
50
  """Logical Unit base class.
51

52
  Subclasses must follow these rules:
53
    - implement ExpandNames
54
    - implement CheckPrereq
55
    - implement Exec
56
    - implement BuildHooksEnv
57
    - redefine HPATH and HTYPE
58
    - optionally redefine their run requirements:
59
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
60

61
  Note that all commands require root permissions.
62

63
  """
64
  HPATH = None
65
  HTYPE = None
66
  _OP_REQP = []
67
  REQ_BGL = True
68

    
69
  def __init__(self, processor, op, context, rpc):
70
    """Constructor for LogicalUnit.
71

72
    This needs to be overriden in derived classes in order to check op
73
    validity.
74

75
    """
76
    self.proc = processor
77
    self.op = op
78
    self.cfg = context.cfg
79
    self.context = context
80
    self.rpc = rpc
81
    # Dicts used to declare locking needs to mcpu
82
    self.needed_locks = None
83
    self.acquired_locks = {}
84
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
85
    self.add_locks = {}
86
    self.remove_locks = {}
87
    # Used to force good behavior when calling helper functions
88
    self.recalculate_locks = {}
89
    self.__ssh = None
90
    # logging
91
    self.LogWarning = processor.LogWarning
92
    self.LogInfo = processor.LogInfo
93

    
94
    for attr_name in self._OP_REQP:
95
      attr_val = getattr(op, attr_name, None)
96
      if attr_val is None:
97
        raise errors.OpPrereqError("Required parameter '%s' missing" %
98
                                   attr_name)
99
    self.CheckArguments()
100

    
101
  def __GetSSH(self):
102
    """Returns the SshRunner object
103

104
    """
105
    if not self.__ssh:
106
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
107
    return self.__ssh
108

    
109
  ssh = property(fget=__GetSSH)
110

    
111
  def CheckArguments(self):
112
    """Check syntactic validity for the opcode arguments.
113

114
    This method is for doing a simple syntactic check and ensure
115
    validity of opcode parameters, without any cluster-related
116
    checks. While the same can be accomplished in ExpandNames and/or
117
    CheckPrereq, doing these separate is better because:
118

119
      - ExpandNames is left as as purely a lock-related function
120
      - CheckPrereq is run after we have aquired locks (and possible
121
        waited for them)
122

123
    The function is allowed to change the self.op attribute so that
124
    later methods can no longer worry about missing parameters.
125

126
    """
127
    pass
128

    
129
  def ExpandNames(self):
130
    """Expand names for this LU.
131

132
    This method is called before starting to execute the opcode, and it should
133
    update all the parameters of the opcode to their canonical form (e.g. a
134
    short node name must be fully expanded after this method has successfully
135
    completed). This way locking, hooks, logging, ecc. can work correctly.
136

137
    LUs which implement this method must also populate the self.needed_locks
138
    member, as a dict with lock levels as keys, and a list of needed lock names
139
    as values. Rules:
140

141
      - use an empty dict if you don't need any lock
142
      - if you don't need any lock at a particular level omit that level
143
      - don't put anything for the BGL level
144
      - if you want all locks at a level use locking.ALL_SET as a value
145

146
    If you need to share locks (rather than acquire them exclusively) at one
147
    level you can modify self.share_locks, setting a true value (usually 1) for
148
    that level. By default locks are not shared.
149

150
    Examples::
151

152
      # Acquire all nodes and one instance
153
      self.needed_locks = {
154
        locking.LEVEL_NODE: locking.ALL_SET,
155
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
156
      }
157
      # Acquire just two nodes
158
      self.needed_locks = {
159
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
160
      }
161
      # Acquire no locks
162
      self.needed_locks = {} # No, you can't leave it to the default value None
163

164
    """
165
    # The implementation of this method is mandatory only if the new LU is
166
    # concurrent, so that old LUs don't need to be changed all at the same
167
    # time.
168
    if self.REQ_BGL:
169
      self.needed_locks = {} # Exclusive LUs don't need locks.
170
    else:
171
      raise NotImplementedError
172

    
173
  def DeclareLocks(self, level):
174
    """Declare LU locking needs for a level
175

176
    While most LUs can just declare their locking needs at ExpandNames time,
177
    sometimes there's the need to calculate some locks after having acquired
178
    the ones before. This function is called just before acquiring locks at a
179
    particular level, but after acquiring the ones at lower levels, and permits
180
    such calculations. It can be used to modify self.needed_locks, and by
181
    default it does nothing.
182

183
    This function is only called if you have something already set in
184
    self.needed_locks for the level.
185

186
    @param level: Locking level which is going to be locked
187
    @type level: member of ganeti.locking.LEVELS
188

189
    """
190

    
191
  def CheckPrereq(self):
192
    """Check prerequisites for this LU.
193

194
    This method should check that the prerequisites for the execution
195
    of this LU are fulfilled. It can do internode communication, but
196
    it should be idempotent - no cluster or system changes are
197
    allowed.
198

199
    The method should raise errors.OpPrereqError in case something is
200
    not fulfilled. Its return value is ignored.
201

202
    This method should also update all the parameters of the opcode to
203
    their canonical form if it hasn't been done by ExpandNames before.
204

205
    """
206
    raise NotImplementedError
207

    
208
  def Exec(self, feedback_fn):
209
    """Execute the LU.
210

211
    This method should implement the actual work. It should raise
212
    errors.OpExecError for failures that are somewhat dealt with in
213
    code, or expected.
214

215
    """
216
    raise NotImplementedError
217

    
218
  def BuildHooksEnv(self):
219
    """Build hooks environment for this LU.
220

221
    This method should return a three-node tuple consisting of: a dict
222
    containing the environment that will be used for running the
223
    specific hook for this LU, a list of node names on which the hook
224
    should run before the execution, and a list of node names on which
225
    the hook should run after the execution.
226

227
    The keys of the dict must not have 'GANETI_' prefixed as this will
228
    be handled in the hooks runner. Also note additional keys will be
229
    added by the hooks runner. If the LU doesn't define any
230
    environment, an empty dict (and not None) should be returned.
231

232
    No nodes should be returned as an empty list (and not None).
233

234
    Note that if the HPATH for a LU class is None, this function will
235
    not be called.
236

237
    """
238
    raise NotImplementedError
239

    
240
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
241
    """Notify the LU about the results of its hooks.
242

243
    This method is called every time a hooks phase is executed, and notifies
244
    the Logical Unit about the hooks' result. The LU can then use it to alter
245
    its result based on the hooks.  By default the method does nothing and the
246
    previous result is passed back unchanged but any LU can define it if it
247
    wants to use the local cluster hook-scripts somehow.
248

249
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
250
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
251
    @param hook_results: the results of the multi-node hooks rpc call
252
    @param feedback_fn: function used send feedback back to the caller
253
    @param lu_result: the previous Exec result this LU had, or None
254
        in the PRE phase
255
    @return: the new Exec result, based on the previous result
256
        and hook results
257

258
    """
259
    return lu_result
260

    
261
  def _ExpandAndLockInstance(self):
262
    """Helper function to expand and lock an instance.
263

264
    Many LUs that work on an instance take its name in self.op.instance_name
265
    and need to expand it and then declare the expanded name for locking. This
266
    function does it, and then updates self.op.instance_name to the expanded
267
    name. It also initializes needed_locks as a dict, if this hasn't been done
268
    before.
269

270
    """
271
    if self.needed_locks is None:
272
      self.needed_locks = {}
273
    else:
274
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
275
        "_ExpandAndLockInstance called with instance-level locks set"
276
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
277
    if expanded_name is None:
278
      raise errors.OpPrereqError("Instance '%s' not known" %
279
                                  self.op.instance_name)
280
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
281
    self.op.instance_name = expanded_name
282

    
283
  def _LockInstancesNodes(self, primary_only=False):
284
    """Helper function to declare instances' nodes for locking.
285

286
    This function should be called after locking one or more instances to lock
287
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
288
    with all primary or secondary nodes for instances already locked and
289
    present in self.needed_locks[locking.LEVEL_INSTANCE].
290

291
    It should be called from DeclareLocks, and for safety only works if
292
    self.recalculate_locks[locking.LEVEL_NODE] is set.
293

294
    In the future it may grow parameters to just lock some instance's nodes, or
295
    to just lock primaries or secondary nodes, if needed.
296

297
    If should be called in DeclareLocks in a way similar to::
298

299
      if level == locking.LEVEL_NODE:
300
        self._LockInstancesNodes()
301

302
    @type primary_only: boolean
303
    @param primary_only: only lock primary nodes of locked instances
304

305
    """
306
    assert locking.LEVEL_NODE in self.recalculate_locks, \
307
      "_LockInstancesNodes helper function called with no nodes to recalculate"
308

    
309
    # TODO: check if we're really been called with the instance locks held
310

    
311
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
312
    # future we might want to have different behaviors depending on the value
313
    # of self.recalculate_locks[locking.LEVEL_NODE]
314
    wanted_nodes = []
315
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
316
      instance = self.context.cfg.GetInstanceInfo(instance_name)
317
      wanted_nodes.append(instance.primary_node)
318
      if not primary_only:
319
        wanted_nodes.extend(instance.secondary_nodes)
320

    
321
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
322
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
323
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
324
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
325

    
326
    del self.recalculate_locks[locking.LEVEL_NODE]
327

    
328

    
329
class NoHooksLU(LogicalUnit):
330
  """Simple LU which runs no hooks.
331

332
  This LU is intended as a parent for other LogicalUnits which will
333
  run no hooks, in order to reduce duplicate code.
334

335
  """
336
  HPATH = None
337
  HTYPE = None
338

    
339

    
340
def _GetWantedNodes(lu, nodes):
341
  """Returns list of checked and expanded node names.
342

343
  @type lu: L{LogicalUnit}
344
  @param lu: the logical unit on whose behalf we execute
345
  @type nodes: list
346
  @param nodes: list of node names or None for all nodes
347
  @rtype: list
348
  @return: the list of nodes, sorted
349
  @raise errors.OpProgrammerError: if the nodes parameter is wrong type
350

351
  """
352
  if not isinstance(nodes, list):
353
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
354

    
355
  if not nodes:
356
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
357
      " non-empty list of nodes whose name is to be expanded.")
358

    
359
  wanted = []
360
  for name in nodes:
361
    node = lu.cfg.ExpandNodeName(name)
362
    if node is None:
363
      raise errors.OpPrereqError("No such node name '%s'" % name)
364
    wanted.append(node)
365

    
366
  return utils.NiceSort(wanted)
367

    
368

    
369
def _GetWantedInstances(lu, instances):
370
  """Returns list of checked and expanded instance names.
371

372
  @type lu: L{LogicalUnit}
373
  @param lu: the logical unit on whose behalf we execute
374
  @type instances: list
375
  @param instances: list of instance names or None for all instances
376
  @rtype: list
377
  @return: the list of instances, sorted
378
  @raise errors.OpPrereqError: if the instances parameter is wrong type
379
  @raise errors.OpPrereqError: if any of the passed instances is not found
380

381
  """
382
  if not isinstance(instances, list):
383
    raise errors.OpPrereqError("Invalid argument type 'instances'")
384

    
385
  if instances:
386
    wanted = []
387

    
388
    for name in instances:
389
      instance = lu.cfg.ExpandInstanceName(name)
390
      if instance is None:
391
        raise errors.OpPrereqError("No such instance name '%s'" % name)
392
      wanted.append(instance)
393

    
394
  else:
395
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
396
  return wanted
397

    
398

    
399
def _CheckOutputFields(static, dynamic, selected):
400
  """Checks whether all selected fields are valid.
401

402
  @type static: L{utils.FieldSet}
403
  @param static: static fields set
404
  @type dynamic: L{utils.FieldSet}
405
  @param dynamic: dynamic fields set
406

407
  """
408
  f = utils.FieldSet()
409
  f.Extend(static)
410
  f.Extend(dynamic)
411

    
412
  delta = f.NonMatching(selected)
413
  if delta:
414
    raise errors.OpPrereqError("Unknown output fields selected: %s"
415
                               % ",".join(delta))
416

    
417

    
418
def _CheckBooleanOpField(op, name):
419
  """Validates boolean opcode parameters.
420

421
  This will ensure that an opcode parameter is either a boolean value,
422
  or None (but that it always exists).
423

424
  """
425
  val = getattr(op, name, None)
426
  if not (val is None or isinstance(val, bool)):
427
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
428
                               (name, str(val)))
429
  setattr(op, name, val)
430

    
431

    
432
def _CheckNodeOnline(lu, node):
433
  """Ensure that a given node is online.
434

435
  @param lu: the LU on behalf of which we make the check
436
  @param node: the node to check
437
  @raise errors.OpPrereqError: if the node is offline
438

439
  """
440
  if lu.cfg.GetNodeInfo(node).offline:
441
    raise errors.OpPrereqError("Can't use offline node %s" % node)
442

    
443

    
444
def _CheckNodeNotDrained(lu, node):
445
  """Ensure that a given node is not drained.
446

447
  @param lu: the LU on behalf of which we make the check
448
  @param node: the node to check
449
  @raise errors.OpPrereqError: if the node is drained
450

451
  """
452
  if lu.cfg.GetNodeInfo(node).drained:
453
    raise errors.OpPrereqError("Can't use drained node %s" % node)
454

    
455

    
456
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
457
                          memory, vcpus, nics):
458
  """Builds instance related env variables for hooks
459

460
  This builds the hook environment from individual variables.
461

462
  @type name: string
463
  @param name: the name of the instance
464
  @type primary_node: string
465
  @param primary_node: the name of the instance's primary node
466
  @type secondary_nodes: list
467
  @param secondary_nodes: list of secondary nodes as strings
468
  @type os_type: string
469
  @param os_type: the name of the instance's OS
470
  @type status: boolean
471
  @param status: the should_run status of the instance
472
  @type memory: string
473
  @param memory: the memory size of the instance
474
  @type vcpus: string
475
  @param vcpus: the count of VCPUs the instance has
476
  @type nics: list
477
  @param nics: list of tuples (ip, bridge, mac) representing
478
      the NICs the instance  has
479
  @rtype: dict
480
  @return: the hook environment for this instance
481

482
  """
483
  if status:
484
    str_status = "up"
485
  else:
486
    str_status = "down"
487
  env = {
488
    "OP_TARGET": name,
489
    "INSTANCE_NAME": name,
490
    "INSTANCE_PRIMARY": primary_node,
491
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
492
    "INSTANCE_OS_TYPE": os_type,
493
    "INSTANCE_STATUS": str_status,
494
    "INSTANCE_MEMORY": memory,
495
    "INSTANCE_VCPUS": vcpus,
496
  }
497

    
498
  if nics:
499
    nic_count = len(nics)
500
    for idx, (ip, bridge, mac) in enumerate(nics):
501
      if ip is None:
502
        ip = ""
503
      env["INSTANCE_NIC%d_IP" % idx] = ip
504
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
505
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
506
  else:
507
    nic_count = 0
508

    
509
  env["INSTANCE_NIC_COUNT"] = nic_count
510

    
511
  return env
512

    
513

    
514
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
515
  """Builds instance related env variables for hooks from an object.
516

517
  @type lu: L{LogicalUnit}
518
  @param lu: the logical unit on whose behalf we execute
519
  @type instance: L{objects.Instance}
520
  @param instance: the instance for which we should build the
521
      environment
522
  @type override: dict
523
  @param override: dictionary with key/values that will override
524
      our values
525
  @rtype: dict
526
  @return: the hook environment dictionary
527

528
  """
529
  bep = lu.cfg.GetClusterInfo().FillBE(instance)
530
  args = {
531
    'name': instance.name,
532
    'primary_node': instance.primary_node,
533
    'secondary_nodes': instance.secondary_nodes,
534
    'os_type': instance.os,
535
    'status': instance.admin_up,
536
    'memory': bep[constants.BE_MEMORY],
537
    'vcpus': bep[constants.BE_VCPUS],
538
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
539
  }
540
  if override:
541
    args.update(override)
542
  return _BuildInstanceHookEnv(**args)
543

    
544

    
545
def _AdjustCandidatePool(lu):
546
  """Adjust the candidate pool after node operations.
547

548
  """
549
  mod_list = lu.cfg.MaintainCandidatePool()
550
  if mod_list:
551
    lu.LogInfo("Promoted nodes to master candidate role: %s",
552
               ", ".join(node.name for node in mod_list))
553
    for name in mod_list:
554
      lu.context.ReaddNode(name)
555
  mc_now, mc_max = lu.cfg.GetMasterCandidateStats()
556
  if mc_now > mc_max:
557
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
558
               (mc_now, mc_max))
559

    
560

    
561
def _CheckInstanceBridgesExist(lu, instance):
562
  """Check that the brigdes needed by an instance exist.
563

564
  """
565
  # check bridges existance
566
  brlist = [nic.bridge for nic in instance.nics]
567
  result = lu.rpc.call_bridges_exist(instance.primary_node, brlist)
568
  result.Raise()
569
  if not result.data:
570
    raise errors.OpPrereqError("One or more target bridges %s does not"
571
                               " exist on destination node '%s'" %
572
                               (brlist, instance.primary_node))
573

    
574

    
575
class LUDestroyCluster(NoHooksLU):
576
  """Logical unit for destroying the cluster.
577

578
  """
579
  _OP_REQP = []
580

    
581
  def CheckPrereq(self):
582
    """Check prerequisites.
583

584
    This checks whether the cluster is empty.
585

586
    Any errors are signalled by raising errors.OpPrereqError.
587

588
    """
589
    master = self.cfg.GetMasterNode()
590

    
591
    nodelist = self.cfg.GetNodeList()
592
    if len(nodelist) != 1 or nodelist[0] != master:
593
      raise errors.OpPrereqError("There are still %d node(s) in"
594
                                 " this cluster." % (len(nodelist) - 1))
595
    instancelist = self.cfg.GetInstanceList()
596
    if instancelist:
597
      raise errors.OpPrereqError("There are still %d instance(s) in"
598
                                 " this cluster." % len(instancelist))
599

    
600
  def Exec(self, feedback_fn):
601
    """Destroys the cluster.
602

603
    """
604
    master = self.cfg.GetMasterNode()
605
    result = self.rpc.call_node_stop_master(master, False)
606
    result.Raise()
607
    if not result.data:
608
      raise errors.OpExecError("Could not disable the master role")
609
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
610
    utils.CreateBackup(priv_key)
611
    utils.CreateBackup(pub_key)
612
    return master
613

    
614

    
615
class LUVerifyCluster(LogicalUnit):
616
  """Verifies the cluster status.
617

618
  """
619
  HPATH = "cluster-verify"
620
  HTYPE = constants.HTYPE_CLUSTER
621
  _OP_REQP = ["skip_checks"]
622
  REQ_BGL = False
623

    
624
  def ExpandNames(self):
625
    self.needed_locks = {
626
      locking.LEVEL_NODE: locking.ALL_SET,
627
      locking.LEVEL_INSTANCE: locking.ALL_SET,
628
    }
629
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
630

    
631
  def _VerifyNode(self, nodeinfo, file_list, local_cksum,
632
                  node_result, feedback_fn, master_files,
633
                  drbd_map):
634
    """Run multiple tests against a node.
635

636
    Test list:
637

638
      - compares ganeti version
639
      - checks vg existance and size > 20G
640
      - checks config file checksum
641
      - checks ssh to other nodes
642

643
    @type nodeinfo: L{objects.Node}
644
    @param nodeinfo: the node to check
645
    @param file_list: required list of files
646
    @param local_cksum: dictionary of local files and their checksums
647
    @param node_result: the results from the node
648
    @param feedback_fn: function used to accumulate results
649
    @param master_files: list of files that only masters should have
650
    @param drbd_map: the useddrbd minors for this node, in
651
        form of minor: (instance, must_exist) which correspond to instances
652
        and their running status
653

654
    """
655
    node = nodeinfo.name
656

    
657
    # main result, node_result should be a non-empty dict
658
    if not node_result or not isinstance(node_result, dict):
659
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
660
      return True
661

    
662
    # compares ganeti version
663
    local_version = constants.PROTOCOL_VERSION
664
    remote_version = node_result.get('version', None)
665
    if not (remote_version and isinstance(remote_version, (list, tuple)) and
666
            len(remote_version) == 2):
667
      feedback_fn("  - ERROR: connection to %s failed" % (node))
668
      return True
669

    
670
    if local_version != remote_version[0]:
671
      feedback_fn("  - ERROR: incompatible protocol versions: master %s,"
672
                  " node %s %s" % (local_version, node, remote_version[0]))
673
      return True
674

    
675
    # node seems compatible, we can actually try to look into its results
676

    
677
    bad = False
678

    
679
    # full package version
680
    if constants.RELEASE_VERSION != remote_version[1]:
681
      feedback_fn("  - WARNING: software version mismatch: master %s,"
682
                  " node %s %s" %
683
                  (constants.RELEASE_VERSION, node, remote_version[1]))
684

    
685
    # checks vg existence and size > 20G
686

    
687
    vglist = node_result.get(constants.NV_VGLIST, None)
688
    if not vglist:
689
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
690
                      (node,))
691
      bad = True
692
    else:
693
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
694
                                            constants.MIN_VG_SIZE)
695
      if vgstatus:
696
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
697
        bad = True
698

    
699
    # checks config file checksum
700

    
701
    remote_cksum = node_result.get(constants.NV_FILELIST, None)
702
    if not isinstance(remote_cksum, dict):
703
      bad = True
704
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
705
    else:
706
      for file_name in file_list:
707
        node_is_mc = nodeinfo.master_candidate
708
        must_have_file = file_name not in master_files
709
        if file_name not in remote_cksum:
710
          if node_is_mc or must_have_file:
711
            bad = True
712
            feedback_fn("  - ERROR: file '%s' missing" % file_name)
713
        elif remote_cksum[file_name] != local_cksum[file_name]:
714
          if node_is_mc or must_have_file:
715
            bad = True
716
            feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
717
          else:
718
            # not candidate and this is not a must-have file
719
            bad = True
720
            feedback_fn("  - ERROR: non master-candidate has old/wrong file"
721
                        " '%s'" % file_name)
722
        else:
723
          # all good, except non-master/non-must have combination
724
          if not node_is_mc and not must_have_file:
725
            feedback_fn("  - ERROR: file '%s' should not exist on non master"
726
                        " candidates" % file_name)
727

    
728
    # checks ssh to any
729

    
730
    if constants.NV_NODELIST not in node_result:
731
      bad = True
732
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
733
    else:
734
      if node_result[constants.NV_NODELIST]:
735
        bad = True
736
        for node in node_result[constants.NV_NODELIST]:
737
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
738
                          (node, node_result[constants.NV_NODELIST][node]))
739

    
740
    if constants.NV_NODENETTEST not in node_result:
741
      bad = True
742
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
743
    else:
744
      if node_result[constants.NV_NODENETTEST]:
745
        bad = True
746
        nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys())
747
        for node in nlist:
748
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
749
                          (node, node_result[constants.NV_NODENETTEST][node]))
750

    
751
    hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
752
    if isinstance(hyp_result, dict):
753
      for hv_name, hv_result in hyp_result.iteritems():
754
        if hv_result is not None:
755
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
756
                      (hv_name, hv_result))
757

    
758
    # check used drbd list
759
    used_minors = node_result.get(constants.NV_DRBDLIST, [])
760
    for minor, (iname, must_exist) in drbd_map.items():
761
      if minor not in used_minors and must_exist:
762
        feedback_fn("  - ERROR: drbd minor %d of instance %s is not active" %
763
                    (minor, iname))
764
        bad = True
765
    for minor in used_minors:
766
      if minor not in drbd_map:
767
        feedback_fn("  - ERROR: unallocated drbd minor %d is in use" % minor)
768
        bad = True
769

    
770
    return bad
771

    
772
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
773
                      node_instance, feedback_fn, n_offline):
774
    """Verify an instance.
775

776
    This function checks to see if the required block devices are
777
    available on the instance's node.
778

779
    """
780
    bad = False
781

    
782
    node_current = instanceconfig.primary_node
783

    
784
    node_vol_should = {}
785
    instanceconfig.MapLVsByNode(node_vol_should)
786

    
787
    for node in node_vol_should:
788
      if node in n_offline:
789
        # ignore missing volumes on offline nodes
790
        continue
791
      for volume in node_vol_should[node]:
792
        if node not in node_vol_is or volume not in node_vol_is[node]:
793
          feedback_fn("  - ERROR: volume %s missing on node %s" %
794
                          (volume, node))
795
          bad = True
796

    
797
    if instanceconfig.admin_up:
798
      if ((node_current not in node_instance or
799
          not instance in node_instance[node_current]) and
800
          node_current not in n_offline):
801
        feedback_fn("  - ERROR: instance %s not running on node %s" %
802
                        (instance, node_current))
803
        bad = True
804

    
805
    for node in node_instance:
806
      if (not node == node_current):
807
        if instance in node_instance[node]:
808
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
809
                          (instance, node))
810
          bad = True
811

    
812
    return bad
813

    
814
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
815
    """Verify if there are any unknown volumes in the cluster.
816

817
    The .os, .swap and backup volumes are ignored. All other volumes are
818
    reported as unknown.
819

820
    """
821
    bad = False
822

    
823
    for node in node_vol_is:
824
      for volume in node_vol_is[node]:
825
        if node not in node_vol_should or volume not in node_vol_should[node]:
826
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
827
                      (volume, node))
828
          bad = True
829
    return bad
830

    
831
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
832
    """Verify the list of running instances.
833

834
    This checks what instances are running but unknown to the cluster.
835

836
    """
837
    bad = False
838
    for node in node_instance:
839
      for runninginstance in node_instance[node]:
840
        if runninginstance not in instancelist:
841
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
842
                          (runninginstance, node))
843
          bad = True
844
    return bad
845

    
846
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
847
    """Verify N+1 Memory Resilience.
848

849
    Check that if one single node dies we can still start all the instances it
850
    was primary for.
851

852
    """
853
    bad = False
854

    
855
    for node, nodeinfo in node_info.iteritems():
856
      # This code checks that every node which is now listed as secondary has
857
      # enough memory to host all instances it is supposed to should a single
858
      # other node in the cluster fail.
859
      # FIXME: not ready for failover to an arbitrary node
860
      # FIXME: does not support file-backed instances
861
      # WARNING: we currently take into account down instances as well as up
862
      # ones, considering that even if they're down someone might want to start
863
      # them even in the event of a node failure.
864
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
865
        needed_mem = 0
866
        for instance in instances:
867
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
868
          if bep[constants.BE_AUTO_BALANCE]:
869
            needed_mem += bep[constants.BE_MEMORY]
870
        if nodeinfo['mfree'] < needed_mem:
871
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
872
                      " failovers should node %s fail" % (node, prinode))
873
          bad = True
874
    return bad
875

    
876
  def CheckPrereq(self):
877
    """Check prerequisites.
878

879
    Transform the list of checks we're going to skip into a set and check that
880
    all its members are valid.
881

882
    """
883
    self.skip_set = frozenset(self.op.skip_checks)
884
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
885
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
886

    
887
  def BuildHooksEnv(self):
888
    """Build hooks env.
889

890
    Cluster-Verify hooks just rone in the post phase and their failure makes
891
    the output be logged in the verify output and the verification to fail.
892

893
    """
894
    all_nodes = self.cfg.GetNodeList()
895
    # TODO: populate the environment with useful information for verify hooks
896
    env = {}
897
    return env, [], all_nodes
898

    
899
  def Exec(self, feedback_fn):
900
    """Verify integrity of cluster, performing various test on nodes.
901

902
    """
903
    bad = False
904
    feedback_fn("* Verifying global settings")
905
    for msg in self.cfg.VerifyConfig():
906
      feedback_fn("  - ERROR: %s" % msg)
907

    
908
    vg_name = self.cfg.GetVGName()
909
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
910
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
911
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
912
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
913
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
914
                        for iname in instancelist)
915
    i_non_redundant = [] # Non redundant instances
916
    i_non_a_balanced = [] # Non auto-balanced instances
917
    n_offline = [] # List of offline nodes
918
    n_drained = [] # List of nodes being drained
919
    node_volume = {}
920
    node_instance = {}
921
    node_info = {}
922
    instance_cfg = {}
923

    
924
    # FIXME: verify OS list
925
    # do local checksums
926
    master_files = [constants.CLUSTER_CONF_FILE]
927

    
928
    file_names = ssconf.SimpleStore().GetFileList()
929
    file_names.append(constants.SSL_CERT_FILE)
930
    file_names.append(constants.RAPI_CERT_FILE)
931
    file_names.extend(master_files)
932

    
933
    local_checksums = utils.FingerprintFiles(file_names)
934

    
935
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
936
    node_verify_param = {
937
      constants.NV_FILELIST: file_names,
938
      constants.NV_NODELIST: [node.name for node in nodeinfo
939
                              if not node.offline],
940
      constants.NV_HYPERVISOR: hypervisors,
941
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
942
                                  node.secondary_ip) for node in nodeinfo
943
                                 if not node.offline],
944
      constants.NV_LVLIST: vg_name,
945
      constants.NV_INSTANCELIST: hypervisors,
946
      constants.NV_VGLIST: None,
947
      constants.NV_VERSION: None,
948
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
949
      constants.NV_DRBDLIST: None,
950
      }
951
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
952
                                           self.cfg.GetClusterName())
953

    
954
    cluster = self.cfg.GetClusterInfo()
955
    master_node = self.cfg.GetMasterNode()
956
    all_drbd_map = self.cfg.ComputeDRBDMap()
957

    
958
    for node_i in nodeinfo:
959
      node = node_i.name
960
      nresult = all_nvinfo[node].data
961

    
962
      if node_i.offline:
963
        feedback_fn("* Skipping offline node %s" % (node,))
964
        n_offline.append(node)
965
        continue
966

    
967
      if node == master_node:
968
        ntype = "master"
969
      elif node_i.master_candidate:
970
        ntype = "master candidate"
971
      elif node_i.drained:
972
        ntype = "drained"
973
        n_drained.append(node)
974
      else:
975
        ntype = "regular"
976
      feedback_fn("* Verifying node %s (%s)" % (node, ntype))
977

    
978
      if all_nvinfo[node].failed or not isinstance(nresult, dict):
979
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
980
        bad = True
981
        continue
982

    
983
      node_drbd = {}
984
      for minor, instance in all_drbd_map[node].items():
985
        instance = instanceinfo[instance]
986
        node_drbd[minor] = (instance.name, instance.admin_up)
987
      result = self._VerifyNode(node_i, file_names, local_checksums,
988
                                nresult, feedback_fn, master_files,
989
                                node_drbd)
990
      bad = bad or result
991

    
992
      lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
993
      if isinstance(lvdata, basestring):
994
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
995
                    (node, utils.SafeEncode(lvdata)))
996
        bad = True
997
        node_volume[node] = {}
998
      elif not isinstance(lvdata, dict):
999
        feedback_fn("  - ERROR: connection to %s failed (lvlist)" % (node,))
1000
        bad = True
1001
        continue
1002
      else:
1003
        node_volume[node] = lvdata
1004

    
1005
      # node_instance
1006
      idata = nresult.get(constants.NV_INSTANCELIST, None)
1007
      if not isinstance(idata, list):
1008
        feedback_fn("  - ERROR: connection to %s failed (instancelist)" %
1009
                    (node,))
1010
        bad = True
1011
        continue
1012

    
1013
      node_instance[node] = idata
1014

    
1015
      # node_info
1016
      nodeinfo = nresult.get(constants.NV_HVINFO, None)
1017
      if not isinstance(nodeinfo, dict):
1018
        feedback_fn("  - ERROR: connection to %s failed (hvinfo)" % (node,))
1019
        bad = True
1020
        continue
1021

    
1022
      try:
1023
        node_info[node] = {
1024
          "mfree": int(nodeinfo['memory_free']),
1025
          "dfree": int(nresult[constants.NV_VGLIST][vg_name]),
1026
          "pinst": [],
1027
          "sinst": [],
1028
          # dictionary holding all instances this node is secondary for,
1029
          # grouped by their primary node. Each key is a cluster node, and each
1030
          # value is a list of instances which have the key as primary and the
1031
          # current node as secondary.  this is handy to calculate N+1 memory
1032
          # availability if you can only failover from a primary to its
1033
          # secondary.
1034
          "sinst-by-pnode": {},
1035
        }
1036
      except ValueError:
1037
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
1038
        bad = True
1039
        continue
1040

    
1041
    node_vol_should = {}
1042

    
1043
    for instance in instancelist:
1044
      feedback_fn("* Verifying instance %s" % instance)
1045
      inst_config = instanceinfo[instance]
1046
      result =  self._VerifyInstance(instance, inst_config, node_volume,
1047
                                     node_instance, feedback_fn, n_offline)
1048
      bad = bad or result
1049
      inst_nodes_offline = []
1050

    
1051
      inst_config.MapLVsByNode(node_vol_should)
1052

    
1053
      instance_cfg[instance] = inst_config
1054

    
1055
      pnode = inst_config.primary_node
1056
      if pnode in node_info:
1057
        node_info[pnode]['pinst'].append(instance)
1058
      elif pnode not in n_offline:
1059
        feedback_fn("  - ERROR: instance %s, connection to primary node"
1060
                    " %s failed" % (instance, pnode))
1061
        bad = True
1062

    
1063
      if pnode in n_offline:
1064
        inst_nodes_offline.append(pnode)
1065

    
1066
      # If the instance is non-redundant we cannot survive losing its primary
1067
      # node, so we are not N+1 compliant. On the other hand we have no disk
1068
      # templates with more than one secondary so that situation is not well
1069
      # supported either.
1070
      # FIXME: does not support file-backed instances
1071
      if len(inst_config.secondary_nodes) == 0:
1072
        i_non_redundant.append(instance)
1073
      elif len(inst_config.secondary_nodes) > 1:
1074
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
1075
                    % instance)
1076

    
1077
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1078
        i_non_a_balanced.append(instance)
1079

    
1080
      for snode in inst_config.secondary_nodes:
1081
        if snode in node_info:
1082
          node_info[snode]['sinst'].append(instance)
1083
          if pnode not in node_info[snode]['sinst-by-pnode']:
1084
            node_info[snode]['sinst-by-pnode'][pnode] = []
1085
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
1086
        elif snode not in n_offline:
1087
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
1088
                      " %s failed" % (instance, snode))
1089
          bad = True
1090
        if snode in n_offline:
1091
          inst_nodes_offline.append(snode)
1092

    
1093
      if inst_nodes_offline:
1094
        # warn that the instance lives on offline nodes, and set bad=True
1095
        feedback_fn("  - ERROR: instance lives on offline node(s) %s" %
1096
                    ", ".join(inst_nodes_offline))
1097
        bad = True
1098

    
1099
    feedback_fn("* Verifying orphan volumes")
1100
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
1101
                                       feedback_fn)
1102
    bad = bad or result
1103

    
1104
    feedback_fn("* Verifying remaining instances")
1105
    result = self._VerifyOrphanInstances(instancelist, node_instance,
1106
                                         feedback_fn)
1107
    bad = bad or result
1108

    
1109
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1110
      feedback_fn("* Verifying N+1 Memory redundancy")
1111
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
1112
      bad = bad or result
1113

    
1114
    feedback_fn("* Other Notes")
1115
    if i_non_redundant:
1116
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1117
                  % len(i_non_redundant))
1118

    
1119
    if i_non_a_balanced:
1120
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1121
                  % len(i_non_a_balanced))
1122

    
1123
    if n_offline:
1124
      feedback_fn("  - NOTICE: %d offline node(s) found." % len(n_offline))
1125

    
1126
    if n_drained:
1127
      feedback_fn("  - NOTICE: %d drained node(s) found." % len(n_drained))
1128

    
1129
    return not bad
1130

    
1131
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1132
    """Analize the post-hooks' result
1133

1134
    This method analyses the hook result, handles it, and sends some
1135
    nicely-formatted feedback back to the user.
1136

1137
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1138
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1139
    @param hooks_results: the results of the multi-node hooks rpc call
1140
    @param feedback_fn: function used send feedback back to the caller
1141
    @param lu_result: previous Exec result
1142
    @return: the new Exec result, based on the previous result
1143
        and hook results
1144

1145
    """
1146
    # We only really run POST phase hooks, and are only interested in
1147
    # their results
1148
    if phase == constants.HOOKS_PHASE_POST:
1149
      # Used to change hooks' output to proper indentation
1150
      indent_re = re.compile('^', re.M)
1151
      feedback_fn("* Hooks Results")
1152
      if not hooks_results:
1153
        feedback_fn("  - ERROR: general communication failure")
1154
        lu_result = 1
1155
      else:
1156
        for node_name in hooks_results:
1157
          show_node_header = True
1158
          res = hooks_results[node_name]
1159
          if res.failed or res.data is False or not isinstance(res.data, list):
1160
            if res.offline:
1161
              # no need to warn or set fail return value
1162
              continue
1163
            feedback_fn("    Communication failure in hooks execution")
1164
            lu_result = 1
1165
            continue
1166
          for script, hkr, output in res.data:
1167
            if hkr == constants.HKR_FAIL:
1168
              # The node header is only shown once, if there are
1169
              # failing hooks on that node
1170
              if show_node_header:
1171
                feedback_fn("  Node %s:" % node_name)
1172
                show_node_header = False
1173
              feedback_fn("    ERROR: Script %s failed, output:" % script)
1174
              output = indent_re.sub('      ', output)
1175
              feedback_fn("%s" % output)
1176
              lu_result = 1
1177

    
1178
      return lu_result
1179

    
1180

    
1181
class LUVerifyDisks(NoHooksLU):
1182
  """Verifies the cluster disks status.
1183

1184
  """
1185
  _OP_REQP = []
1186
  REQ_BGL = False
1187

    
1188
  def ExpandNames(self):
1189
    self.needed_locks = {
1190
      locking.LEVEL_NODE: locking.ALL_SET,
1191
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1192
    }
1193
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1194

    
1195
  def CheckPrereq(self):
1196
    """Check prerequisites.
1197

1198
    This has no prerequisites.
1199

1200
    """
1201
    pass
1202

    
1203
  def Exec(self, feedback_fn):
1204
    """Verify integrity of cluster disks.
1205

1206
    """
1207
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
1208

    
1209
    vg_name = self.cfg.GetVGName()
1210
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1211
    instances = [self.cfg.GetInstanceInfo(name)
1212
                 for name in self.cfg.GetInstanceList()]
1213

    
1214
    nv_dict = {}
1215
    for inst in instances:
1216
      inst_lvs = {}
1217
      if (not inst.admin_up or
1218
          inst.disk_template not in constants.DTS_NET_MIRROR):
1219
        continue
1220
      inst.MapLVsByNode(inst_lvs)
1221
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1222
      for node, vol_list in inst_lvs.iteritems():
1223
        for vol in vol_list:
1224
          nv_dict[(node, vol)] = inst
1225

    
1226
    if not nv_dict:
1227
      return result
1228

    
1229
    node_lvs = self.rpc.call_volume_list(nodes, vg_name)
1230

    
1231
    to_act = set()
1232
    for node in nodes:
1233
      # node_volume
1234
      lvs = node_lvs[node]
1235
      if lvs.failed:
1236
        if not lvs.offline:
1237
          self.LogWarning("Connection to node %s failed: %s" %
1238
                          (node, lvs.data))
1239
        continue
1240
      lvs = lvs.data
1241
      if isinstance(lvs, basestring):
1242
        logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
1243
        res_nlvm[node] = lvs
1244
      elif not isinstance(lvs, dict):
1245
        logging.warning("Connection to node %s failed or invalid data"
1246
                        " returned", node)
1247
        res_nodes.append(node)
1248
        continue
1249

    
1250
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
1251
        inst = nv_dict.pop((node, lv_name), None)
1252
        if (not lv_online and inst is not None
1253
            and inst.name not in res_instances):
1254
          res_instances.append(inst.name)
1255

    
1256
    # any leftover items in nv_dict are missing LVs, let's arrange the
1257
    # data better
1258
    for key, inst in nv_dict.iteritems():
1259
      if inst.name not in res_missing:
1260
        res_missing[inst.name] = []
1261
      res_missing[inst.name].append(key)
1262

    
1263
    return result
1264

    
1265

    
1266
class LURenameCluster(LogicalUnit):
1267
  """Rename the cluster.
1268

1269
  """
1270
  HPATH = "cluster-rename"
1271
  HTYPE = constants.HTYPE_CLUSTER
1272
  _OP_REQP = ["name"]
1273

    
1274
  def BuildHooksEnv(self):
1275
    """Build hooks env.
1276

1277
    """
1278
    env = {
1279
      "OP_TARGET": self.cfg.GetClusterName(),
1280
      "NEW_NAME": self.op.name,
1281
      }
1282
    mn = self.cfg.GetMasterNode()
1283
    return env, [mn], [mn]
1284

    
1285
  def CheckPrereq(self):
1286
    """Verify that the passed name is a valid one.
1287

1288
    """
1289
    hostname = utils.HostInfo(self.op.name)
1290

    
1291
    new_name = hostname.name
1292
    self.ip = new_ip = hostname.ip
1293
    old_name = self.cfg.GetClusterName()
1294
    old_ip = self.cfg.GetMasterIP()
1295
    if new_name == old_name and new_ip == old_ip:
1296
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1297
                                 " cluster has changed")
1298
    if new_ip != old_ip:
1299
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1300
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1301
                                   " reachable on the network. Aborting." %
1302
                                   new_ip)
1303

    
1304
    self.op.name = new_name
1305

    
1306
  def Exec(self, feedback_fn):
1307
    """Rename the cluster.
1308

1309
    """
1310
    clustername = self.op.name
1311
    ip = self.ip
1312

    
1313
    # shutdown the master IP
1314
    master = self.cfg.GetMasterNode()
1315
    result = self.rpc.call_node_stop_master(master, False)
1316
    if result.failed or not result.data:
1317
      raise errors.OpExecError("Could not disable the master role")
1318

    
1319
    try:
1320
      cluster = self.cfg.GetClusterInfo()
1321
      cluster.cluster_name = clustername
1322
      cluster.master_ip = ip
1323
      self.cfg.Update(cluster)
1324

    
1325
      # update the known hosts file
1326
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
1327
      node_list = self.cfg.GetNodeList()
1328
      try:
1329
        node_list.remove(master)
1330
      except ValueError:
1331
        pass
1332
      result = self.rpc.call_upload_file(node_list,
1333
                                         constants.SSH_KNOWN_HOSTS_FILE)
1334
      for to_node, to_result in result.iteritems():
1335
        if to_result.failed or not to_result.data:
1336
          logging.error("Copy of file %s to node %s failed",
1337
                        constants.SSH_KNOWN_HOSTS_FILE, to_node)
1338

    
1339
    finally:
1340
      result = self.rpc.call_node_start_master(master, False)
1341
      if result.failed or not result.data:
1342
        self.LogWarning("Could not re-enable the master role on"
1343
                        " the master, please restart manually.")
1344

    
1345

    
1346
def _RecursiveCheckIfLVMBased(disk):
1347
  """Check if the given disk or its children are lvm-based.
1348

1349
  @type disk: L{objects.Disk}
1350
  @param disk: the disk to check
1351
  @rtype: booleean
1352
  @return: boolean indicating whether a LD_LV dev_type was found or not
1353

1354
  """
1355
  if disk.children:
1356
    for chdisk in disk.children:
1357
      if _RecursiveCheckIfLVMBased(chdisk):
1358
        return True
1359
  return disk.dev_type == constants.LD_LV
1360

    
1361

    
1362
class LUSetClusterParams(LogicalUnit):
1363
  """Change the parameters of the cluster.
1364

1365
  """
1366
  HPATH = "cluster-modify"
1367
  HTYPE = constants.HTYPE_CLUSTER
1368
  _OP_REQP = []
1369
  REQ_BGL = False
1370

    
1371
  def CheckParameters(self):
1372
    """Check parameters
1373

1374
    """
1375
    if not hasattr(self.op, "candidate_pool_size"):
1376
      self.op.candidate_pool_size = None
1377
    if self.op.candidate_pool_size is not None:
1378
      try:
1379
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1380
      except ValueError, err:
1381
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1382
                                   str(err))
1383
      if self.op.candidate_pool_size < 1:
1384
        raise errors.OpPrereqError("At least one master candidate needed")
1385

    
1386
  def ExpandNames(self):
1387
    # FIXME: in the future maybe other cluster params won't require checking on
1388
    # all nodes to be modified.
1389
    self.needed_locks = {
1390
      locking.LEVEL_NODE: locking.ALL_SET,
1391
    }
1392
    self.share_locks[locking.LEVEL_NODE] = 1
1393

    
1394
  def BuildHooksEnv(self):
1395
    """Build hooks env.
1396

1397
    """
1398
    env = {
1399
      "OP_TARGET": self.cfg.GetClusterName(),
1400
      "NEW_VG_NAME": self.op.vg_name,
1401
      }
1402
    mn = self.cfg.GetMasterNode()
1403
    return env, [mn], [mn]
1404

    
1405
  def CheckPrereq(self):
1406
    """Check prerequisites.
1407

1408
    This checks whether the given params don't conflict and
1409
    if the given volume group is valid.
1410

1411
    """
1412
    if self.op.vg_name is not None and not self.op.vg_name:
1413
      instances = self.cfg.GetAllInstancesInfo().values()
1414
      for inst in instances:
1415
        for disk in inst.disks:
1416
          if _RecursiveCheckIfLVMBased(disk):
1417
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1418
                                       " lvm-based instances exist")
1419

    
1420
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1421

    
1422
    # if vg_name not None, checks given volume group on all nodes
1423
    if self.op.vg_name:
1424
      vglist = self.rpc.call_vg_list(node_list)
1425
      for node in node_list:
1426
        if vglist[node].failed:
1427
          # ignoring down node
1428
          self.LogWarning("Node %s unreachable/error, ignoring" % node)
1429
          continue
1430
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].data,
1431
                                              self.op.vg_name,
1432
                                              constants.MIN_VG_SIZE)
1433
        if vgstatus:
1434
          raise errors.OpPrereqError("Error on node '%s': %s" %
1435
                                     (node, vgstatus))
1436

    
1437
    self.cluster = cluster = self.cfg.GetClusterInfo()
1438
    # validate beparams changes
1439
    if self.op.beparams:
1440
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
1441
      self.new_beparams = cluster.FillDict(
1442
        cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)
1443

    
1444
    # hypervisor list/parameters
1445
    self.new_hvparams = cluster.FillDict(cluster.hvparams, {})
1446
    if self.op.hvparams:
1447
      if not isinstance(self.op.hvparams, dict):
1448
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1449
      for hv_name, hv_dict in self.op.hvparams.items():
1450
        if hv_name not in self.new_hvparams:
1451
          self.new_hvparams[hv_name] = hv_dict
1452
        else:
1453
          self.new_hvparams[hv_name].update(hv_dict)
1454

    
1455
    if self.op.enabled_hypervisors is not None:
1456
      self.hv_list = self.op.enabled_hypervisors
1457
    else:
1458
      self.hv_list = cluster.enabled_hypervisors
1459

    
1460
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1461
      # either the enabled list has changed, or the parameters have, validate
1462
      for hv_name, hv_params in self.new_hvparams.items():
1463
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1464
            (self.op.enabled_hypervisors and
1465
             hv_name in self.op.enabled_hypervisors)):
1466
          # either this is a new hypervisor, or its parameters have changed
1467
          hv_class = hypervisor.GetHypervisor(hv_name)
1468
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1469
          hv_class.CheckParameterSyntax(hv_params)
1470
          _CheckHVParams(self, node_list, hv_name, hv_params)
1471

    
1472
  def Exec(self, feedback_fn):
1473
    """Change the parameters of the cluster.
1474

1475
    """
1476
    if self.op.vg_name is not None:
1477
      if self.op.vg_name != self.cfg.GetVGName():
1478
        self.cfg.SetVGName(self.op.vg_name)
1479
      else:
1480
        feedback_fn("Cluster LVM configuration already in desired"
1481
                    " state, not changing")
1482
    if self.op.hvparams:
1483
      self.cluster.hvparams = self.new_hvparams
1484
    if self.op.enabled_hypervisors is not None:
1485
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1486
    if self.op.beparams:
1487
      self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams
1488
    if self.op.candidate_pool_size is not None:
1489
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
1490

    
1491
    self.cfg.Update(self.cluster)
1492

    
1493
    # we want to update nodes after the cluster so that if any errors
1494
    # happen, we have recorded and saved the cluster info
1495
    if self.op.candidate_pool_size is not None:
1496
      _AdjustCandidatePool(self)
1497

    
1498

    
1499
class LURedistributeConfig(NoHooksLU):
1500
  """Force the redistribution of cluster configuration.
1501

1502
  This is a very simple LU.
1503

1504
  """
1505
  _OP_REQP = []
1506
  REQ_BGL = False
1507

    
1508
  def ExpandNames(self):
1509
    self.needed_locks = {
1510
      locking.LEVEL_NODE: locking.ALL_SET,
1511
    }
1512
    self.share_locks[locking.LEVEL_NODE] = 1
1513

    
1514
  def CheckPrereq(self):
1515
    """Check prerequisites.
1516

1517
    """
1518

    
1519
  def Exec(self, feedback_fn):
1520
    """Redistribute the configuration.
1521

1522
    """
1523
    self.cfg.Update(self.cfg.GetClusterInfo())
1524

    
1525

    
1526
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1527
  """Sleep and poll for an instance's disk to sync.
1528

1529
  """
1530
  if not instance.disks:
1531
    return True
1532

    
1533
  if not oneshot:
1534
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1535

    
1536
  node = instance.primary_node
1537

    
1538
  for dev in instance.disks:
1539
    lu.cfg.SetDiskID(dev, node)
1540

    
1541
  retries = 0
1542
  while True:
1543
    max_time = 0
1544
    done = True
1545
    cumul_degraded = False
1546
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1547
    if rstats.failed or not rstats.data:
1548
      lu.LogWarning("Can't get any data from node %s", node)
1549
      retries += 1
1550
      if retries >= 10:
1551
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1552
                                 " aborting." % node)
1553
      time.sleep(6)
1554
      continue
1555
    rstats = rstats.data
1556
    retries = 0
1557
    for i, mstat in enumerate(rstats):
1558
      if mstat is None:
1559
        lu.LogWarning("Can't compute data for node %s/%s",
1560
                           node, instance.disks[i].iv_name)
1561
        continue
1562
      # we ignore the ldisk parameter
1563
      perc_done, est_time, is_degraded, _ = mstat
1564
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1565
      if perc_done is not None:
1566
        done = False
1567
        if est_time is not None:
1568
          rem_time = "%d estimated seconds remaining" % est_time
1569
          max_time = est_time
1570
        else:
1571
          rem_time = "no time estimate"
1572
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1573
                        (instance.disks[i].iv_name, perc_done, rem_time))
1574
    if done or oneshot:
1575
      break
1576

    
1577
    time.sleep(min(60, max_time))
1578

    
1579
  if done:
1580
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1581
  return not cumul_degraded
1582

    
1583

    
1584
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1585
  """Check that mirrors are not degraded.
1586

1587
  The ldisk parameter, if True, will change the test from the
1588
  is_degraded attribute (which represents overall non-ok status for
1589
  the device(s)) to the ldisk (representing the local storage status).
1590

1591
  """
1592
  lu.cfg.SetDiskID(dev, node)
1593
  if ldisk:
1594
    idx = 6
1595
  else:
1596
    idx = 5
1597

    
1598
  result = True
1599
  if on_primary or dev.AssembleOnSecondary():
1600
    rstats = lu.rpc.call_blockdev_find(node, dev)
1601
    msg = rstats.RemoteFailMsg()
1602
    if msg:
1603
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
1604
      result = False
1605
    elif not rstats.payload:
1606
      lu.LogWarning("Can't find disk on node %s", node)
1607
      result = False
1608
    else:
1609
      result = result and (not rstats.payload[idx])
1610
  if dev.children:
1611
    for child in dev.children:
1612
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1613

    
1614
  return result
1615

    
1616

    
1617
class LUDiagnoseOS(NoHooksLU):
1618
  """Logical unit for OS diagnose/query.
1619

1620
  """
1621
  _OP_REQP = ["output_fields", "names"]
1622
  REQ_BGL = False
1623
  _FIELDS_STATIC = utils.FieldSet()
1624
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
1625

    
1626
  def ExpandNames(self):
1627
    if self.op.names:
1628
      raise errors.OpPrereqError("Selective OS query not supported")
1629

    
1630
    _CheckOutputFields(static=self._FIELDS_STATIC,
1631
                       dynamic=self._FIELDS_DYNAMIC,
1632
                       selected=self.op.output_fields)
1633

    
1634
    # Lock all nodes, in shared mode
1635
    self.needed_locks = {}
1636
    self.share_locks[locking.LEVEL_NODE] = 1
1637
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1638

    
1639
  def CheckPrereq(self):
1640
    """Check prerequisites.
1641

1642
    """
1643

    
1644
  @staticmethod
1645
  def _DiagnoseByOS(node_list, rlist):
1646
    """Remaps a per-node return list into an a per-os per-node dictionary
1647

1648
    @param node_list: a list with the names of all nodes
1649
    @param rlist: a map with node names as keys and OS objects as values
1650

1651
    @rtype: dict
1652
    @returns: a dictionary with osnames as keys and as value another map, with
1653
        nodes as keys and list of OS objects as values, eg::
1654

1655
          {"debian-etch": {"node1": [<object>,...],
1656
                           "node2": [<object>,]}
1657
          }
1658

1659
    """
1660
    all_os = {}
1661
    for node_name, nr in rlist.iteritems():
1662
      if nr.failed or not nr.data:
1663
        continue
1664
      for os_obj in nr.data:
1665
        if os_obj.name not in all_os:
1666
          # build a list of nodes for this os containing empty lists
1667
          # for each node in node_list
1668
          all_os[os_obj.name] = {}
1669
          for nname in node_list:
1670
            all_os[os_obj.name][nname] = []
1671
        all_os[os_obj.name][node_name].append(os_obj)
1672
    return all_os
1673

    
1674
  def Exec(self, feedback_fn):
1675
    """Compute the list of OSes.
1676

1677
    """
1678
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1679
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()
1680
                   if node in node_list]
1681
    node_data = self.rpc.call_os_diagnose(valid_nodes)
1682
    if node_data == False:
1683
      raise errors.OpExecError("Can't gather the list of OSes")
1684
    pol = self._DiagnoseByOS(valid_nodes, node_data)
1685
    output = []
1686
    for os_name, os_data in pol.iteritems():
1687
      row = []
1688
      for field in self.op.output_fields:
1689
        if field == "name":
1690
          val = os_name
1691
        elif field == "valid":
1692
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1693
        elif field == "node_status":
1694
          val = {}
1695
          for node_name, nos_list in os_data.iteritems():
1696
            val[node_name] = [(v.status, v.path) for v in nos_list]
1697
        else:
1698
          raise errors.ParameterError(field)
1699
        row.append(val)
1700
      output.append(row)
1701

    
1702
    return output
1703

    
1704

    
1705
class LURemoveNode(LogicalUnit):
1706
  """Logical unit for removing a node.
1707

1708
  """
1709
  HPATH = "node-remove"
1710
  HTYPE = constants.HTYPE_NODE
1711
  _OP_REQP = ["node_name"]
1712

    
1713
  def BuildHooksEnv(self):
1714
    """Build hooks env.
1715

1716
    This doesn't run on the target node in the pre phase as a failed
1717
    node would then be impossible to remove.
1718

1719
    """
1720
    env = {
1721
      "OP_TARGET": self.op.node_name,
1722
      "NODE_NAME": self.op.node_name,
1723
      }
1724
    all_nodes = self.cfg.GetNodeList()
1725
    all_nodes.remove(self.op.node_name)
1726
    return env, all_nodes, all_nodes
1727

    
1728
  def CheckPrereq(self):
1729
    """Check prerequisites.
1730

1731
    This checks:
1732
     - the node exists in the configuration
1733
     - it does not have primary or secondary instances
1734
     - it's not the master
1735

1736
    Any errors are signalled by raising errors.OpPrereqError.
1737

1738
    """
1739
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1740
    if node is None:
1741
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1742

    
1743
    instance_list = self.cfg.GetInstanceList()
1744

    
1745
    masternode = self.cfg.GetMasterNode()
1746
    if node.name == masternode:
1747
      raise errors.OpPrereqError("Node is the master node,"
1748
                                 " you need to failover first.")
1749

    
1750
    for instance_name in instance_list:
1751
      instance = self.cfg.GetInstanceInfo(instance_name)
1752
      if node.name in instance.all_nodes:
1753
        raise errors.OpPrereqError("Instance %s is still running on the node,"
1754
                                   " please remove first." % instance_name)
1755
    self.op.node_name = node.name
1756
    self.node = node
1757

    
1758
  def Exec(self, feedback_fn):
1759
    """Removes the node from the cluster.
1760

1761
    """
1762
    node = self.node
1763
    logging.info("Stopping the node daemon and removing configs from node %s",
1764
                 node.name)
1765

    
1766
    self.context.RemoveNode(node.name)
1767

    
1768
    self.rpc.call_node_leave_cluster(node.name)
1769

    
1770
    # Promote nodes to master candidate as needed
1771
    _AdjustCandidatePool(self)
1772

    
1773

    
1774
class LUQueryNodes(NoHooksLU):
1775
  """Logical unit for querying nodes.
1776

1777
  """
1778
  _OP_REQP = ["output_fields", "names", "use_locking"]
1779
  REQ_BGL = False
1780
  _FIELDS_DYNAMIC = utils.FieldSet(
1781
    "dtotal", "dfree",
1782
    "mtotal", "mnode", "mfree",
1783
    "bootid",
1784
    "ctotal", "cnodes", "csockets",
1785
    )
1786

    
1787
  _FIELDS_STATIC = utils.FieldSet(
1788
    "name", "pinst_cnt", "sinst_cnt",
1789
    "pinst_list", "sinst_list",
1790
    "pip", "sip", "tags",
1791
    "serial_no",
1792
    "master_candidate",
1793
    "master",
1794
    "offline",
1795
    "drained",
1796
    )
1797

    
1798
  def ExpandNames(self):
1799
    _CheckOutputFields(static=self._FIELDS_STATIC,
1800
                       dynamic=self._FIELDS_DYNAMIC,
1801
                       selected=self.op.output_fields)
1802

    
1803
    self.needed_locks = {}
1804
    self.share_locks[locking.LEVEL_NODE] = 1
1805

    
1806
    if self.op.names:
1807
      self.wanted = _GetWantedNodes(self, self.op.names)
1808
    else:
1809
      self.wanted = locking.ALL_SET
1810

    
1811
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
1812
    self.do_locking = self.do_node_query and self.op.use_locking
1813
    if self.do_locking:
1814
      # if we don't request only static fields, we need to lock the nodes
1815
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
1816

    
1817

    
1818
  def CheckPrereq(self):
1819
    """Check prerequisites.
1820

1821
    """
1822
    # The validation of the node list is done in the _GetWantedNodes,
1823
    # if non empty, and if empty, there's no validation to do
1824
    pass
1825

    
1826
  def Exec(self, feedback_fn):
1827
    """Computes the list of nodes and their attributes.
1828

1829
    """
1830
    all_info = self.cfg.GetAllNodesInfo()
1831
    if self.do_locking:
1832
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
1833
    elif self.wanted != locking.ALL_SET:
1834
      nodenames = self.wanted
1835
      missing = set(nodenames).difference(all_info.keys())
1836
      if missing:
1837
        raise errors.OpExecError(
1838
          "Some nodes were removed before retrieving their data: %s" % missing)
1839
    else:
1840
      nodenames = all_info.keys()
1841

    
1842
    nodenames = utils.NiceSort(nodenames)
1843
    nodelist = [all_info[name] for name in nodenames]
1844

    
1845
    # begin data gathering
1846

    
1847
    if self.do_node_query:
1848
      live_data = {}
1849
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
1850
                                          self.cfg.GetHypervisorType())
1851
      for name in nodenames:
1852
        nodeinfo = node_data[name]
1853
        if not nodeinfo.failed and nodeinfo.data:
1854
          nodeinfo = nodeinfo.data
1855
          fn = utils.TryConvert
1856
          live_data[name] = {
1857
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
1858
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
1859
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
1860
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
1861
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
1862
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
1863
            "bootid": nodeinfo.get('bootid', None),
1864
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
1865
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
1866
            }
1867
        else:
1868
          live_data[name] = {}
1869
    else:
1870
      live_data = dict.fromkeys(nodenames, {})
1871

    
1872
    node_to_primary = dict([(name, set()) for name in nodenames])
1873
    node_to_secondary = dict([(name, set()) for name in nodenames])
1874

    
1875
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1876
                             "sinst_cnt", "sinst_list"))
1877
    if inst_fields & frozenset(self.op.output_fields):
1878
      instancelist = self.cfg.GetInstanceList()
1879

    
1880
      for instance_name in instancelist:
1881
        inst = self.cfg.GetInstanceInfo(instance_name)
1882
        if inst.primary_node in node_to_primary:
1883
          node_to_primary[inst.primary_node].add(inst.name)
1884
        for secnode in inst.secondary_nodes:
1885
          if secnode in node_to_secondary:
1886
            node_to_secondary[secnode].add(inst.name)
1887

    
1888
    master_node = self.cfg.GetMasterNode()
1889

    
1890
    # end data gathering
1891

    
1892
    output = []
1893
    for node in nodelist:
1894
      node_output = []
1895
      for field in self.op.output_fields:
1896
        if field == "name":
1897
          val = node.name
1898
        elif field == "pinst_list":
1899
          val = list(node_to_primary[node.name])
1900
        elif field == "sinst_list":
1901
          val = list(node_to_secondary[node.name])
1902
        elif field == "pinst_cnt":
1903
          val = len(node_to_primary[node.name])
1904
        elif field == "sinst_cnt":
1905
          val = len(node_to_secondary[node.name])
1906
        elif field == "pip":
1907
          val = node.primary_ip
1908
        elif field == "sip":
1909
          val = node.secondary_ip
1910
        elif field == "tags":
1911
          val = list(node.GetTags())
1912
        elif field == "serial_no":
1913
          val = node.serial_no
1914
        elif field == "master_candidate":
1915
          val = node.master_candidate
1916
        elif field == "master":
1917
          val = node.name == master_node
1918
        elif field == "offline":
1919
          val = node.offline
1920
        elif field == "drained":
1921
          val = node.drained
1922
        elif self._FIELDS_DYNAMIC.Matches(field):
1923
          val = live_data[node.name].get(field, None)
1924
        else:
1925
          raise errors.ParameterError(field)
1926
        node_output.append(val)
1927
      output.append(node_output)
1928

    
1929
    return output
1930

    
1931

    
1932
class LUQueryNodeVolumes(NoHooksLU):
1933
  """Logical unit for getting volumes on node(s).
1934

1935
  """
1936
  _OP_REQP = ["nodes", "output_fields"]
1937
  REQ_BGL = False
1938
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
1939
  _FIELDS_STATIC = utils.FieldSet("node")
1940

    
1941
  def ExpandNames(self):
1942
    _CheckOutputFields(static=self._FIELDS_STATIC,
1943
                       dynamic=self._FIELDS_DYNAMIC,
1944
                       selected=self.op.output_fields)
1945

    
1946
    self.needed_locks = {}
1947
    self.share_locks[locking.LEVEL_NODE] = 1
1948
    if not self.op.nodes:
1949
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1950
    else:
1951
      self.needed_locks[locking.LEVEL_NODE] = \
1952
        _GetWantedNodes(self, self.op.nodes)
1953

    
1954
  def CheckPrereq(self):
1955
    """Check prerequisites.
1956

1957
    This checks that the fields required are valid output fields.
1958

1959
    """
1960
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
1961

    
1962
  def Exec(self, feedback_fn):
1963
    """Computes the list of nodes and their attributes.
1964

1965
    """
1966
    nodenames = self.nodes
1967
    volumes = self.rpc.call_node_volumes(nodenames)
1968

    
1969
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1970
             in self.cfg.GetInstanceList()]
1971

    
1972
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1973

    
1974
    output = []
1975
    for node in nodenames:
1976
      if node not in volumes or volumes[node].failed or not volumes[node].data:
1977
        continue
1978

    
1979
      node_vols = volumes[node].data[:]
1980
      node_vols.sort(key=lambda vol: vol['dev'])
1981

    
1982
      for vol in node_vols:
1983
        node_output = []
1984
        for field in self.op.output_fields:
1985
          if field == "node":
1986
            val = node
1987
          elif field == "phys":
1988
            val = vol['dev']
1989
          elif field == "vg":
1990
            val = vol['vg']
1991
          elif field == "name":
1992
            val = vol['name']
1993
          elif field == "size":
1994
            val = int(float(vol['size']))
1995
          elif field == "instance":
1996
            for inst in ilist:
1997
              if node not in lv_by_node[inst]:
1998
                continue
1999
              if vol['name'] in lv_by_node[inst][node]:
2000
                val = inst.name
2001
                break
2002
            else:
2003
              val = '-'
2004
          else:
2005
            raise errors.ParameterError(field)
2006
          node_output.append(str(val))
2007

    
2008
        output.append(node_output)
2009

    
2010
    return output
2011

    
2012

    
2013
class LUAddNode(LogicalUnit):
2014
  """Logical unit for adding node to the cluster.
2015

2016
  """
2017
  HPATH = "node-add"
2018
  HTYPE = constants.HTYPE_NODE
2019
  _OP_REQP = ["node_name"]
2020

    
2021
  def BuildHooksEnv(self):
2022
    """Build hooks env.
2023

2024
    This will run on all nodes before, and on all nodes + the new node after.
2025

2026
    """
2027
    env = {
2028
      "OP_TARGET": self.op.node_name,
2029
      "NODE_NAME": self.op.node_name,
2030
      "NODE_PIP": self.op.primary_ip,
2031
      "NODE_SIP": self.op.secondary_ip,
2032
      }
2033
    nodes_0 = self.cfg.GetNodeList()
2034
    nodes_1 = nodes_0 + [self.op.node_name, ]
2035
    return env, nodes_0, nodes_1
2036

    
2037
  def CheckPrereq(self):
2038
    """Check prerequisites.
2039

2040
    This checks:
2041
     - the new node is not already in the config
2042
     - it is resolvable
2043
     - its parameters (single/dual homed) matches the cluster
2044

2045
    Any errors are signalled by raising errors.OpPrereqError.
2046

2047
    """
2048
    node_name = self.op.node_name
2049
    cfg = self.cfg
2050

    
2051
    dns_data = utils.HostInfo(node_name)
2052

    
2053
    node = dns_data.name
2054
    primary_ip = self.op.primary_ip = dns_data.ip
2055
    secondary_ip = getattr(self.op, "secondary_ip", None)
2056
    if secondary_ip is None:
2057
      secondary_ip = primary_ip
2058
    if not utils.IsValidIP(secondary_ip):
2059
      raise errors.OpPrereqError("Invalid secondary IP given")
2060
    self.op.secondary_ip = secondary_ip
2061

    
2062
    node_list = cfg.GetNodeList()
2063
    if not self.op.readd and node in node_list:
2064
      raise errors.OpPrereqError("Node %s is already in the configuration" %
2065
                                 node)
2066
    elif self.op.readd and node not in node_list:
2067
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
2068

    
2069
    for existing_node_name in node_list:
2070
      existing_node = cfg.GetNodeInfo(existing_node_name)
2071

    
2072
      if self.op.readd and node == existing_node_name:
2073
        if (existing_node.primary_ip != primary_ip or
2074
            existing_node.secondary_ip != secondary_ip):
2075
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
2076
                                     " address configuration as before")
2077
        continue
2078

    
2079
      if (existing_node.primary_ip == primary_ip or
2080
          existing_node.secondary_ip == primary_ip or
2081
          existing_node.primary_ip == secondary_ip or
2082
          existing_node.secondary_ip == secondary_ip):
2083
        raise errors.OpPrereqError("New node ip address(es) conflict with"
2084
                                   " existing node %s" % existing_node.name)
2085

    
2086
    # check that the type of the node (single versus dual homed) is the
2087
    # same as for the master
2088
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
2089
    master_singlehomed = myself.secondary_ip == myself.primary_ip
2090
    newbie_singlehomed = secondary_ip == primary_ip
2091
    if master_singlehomed != newbie_singlehomed:
2092
      if master_singlehomed:
2093
        raise errors.OpPrereqError("The master has no private ip but the"
2094
                                   " new node has one")
2095
      else:
2096
        raise errors.OpPrereqError("The master has a private ip but the"
2097
                                   " new node doesn't have one")
2098

    
2099
    # checks reachablity
2100
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
2101
      raise errors.OpPrereqError("Node not reachable by ping")
2102

    
2103
    if not newbie_singlehomed:
2104
      # check reachability from my secondary ip to newbie's secondary ip
2105
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
2106
                           source=myself.secondary_ip):
2107
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
2108
                                   " based ping to noded port")
2109

    
2110
    cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2111
    mc_now, _ = self.cfg.GetMasterCandidateStats()
2112
    master_candidate = mc_now < cp_size
2113

    
2114
    self.new_node = objects.Node(name=node,
2115
                                 primary_ip=primary_ip,
2116
                                 secondary_ip=secondary_ip,
2117
                                 master_candidate=master_candidate,
2118
                                 offline=False, drained=False)
2119

    
2120
  def Exec(self, feedback_fn):
2121
    """Adds the new node to the cluster.
2122

2123
    """
2124
    new_node = self.new_node
2125
    node = new_node.name
2126

    
2127
    # check connectivity
2128
    result = self.rpc.call_version([node])[node]
2129
    result.Raise()
2130
    if result.data:
2131
      if constants.PROTOCOL_VERSION == result.data:
2132
        logging.info("Communication to node %s fine, sw version %s match",
2133
                     node, result.data)
2134
      else:
2135
        raise errors.OpExecError("Version mismatch master version %s,"
2136
                                 " node version %s" %
2137
                                 (constants.PROTOCOL_VERSION, result.data))
2138
    else:
2139
      raise errors.OpExecError("Cannot get version from the new node")
2140

    
2141
    # setup ssh on node
2142
    logging.info("Copy ssh key to node %s", node)
2143
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
2144
    keyarray = []
2145
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
2146
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
2147
                priv_key, pub_key]
2148

    
2149
    for i in keyfiles:
2150
      f = open(i, 'r')
2151
      try:
2152
        keyarray.append(f.read())
2153
      finally:
2154
        f.close()
2155

    
2156
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
2157
                                    keyarray[2],
2158
                                    keyarray[3], keyarray[4], keyarray[5])
2159

    
2160
    msg = result.RemoteFailMsg()
2161
    if msg:
2162
      raise errors.OpExecError("Cannot transfer ssh keys to the"
2163
                               " new node: %s" % msg)
2164

    
2165
    # Add node to our /etc/hosts, and add key to known_hosts
2166
    utils.AddHostToEtcHosts(new_node.name)
2167

    
2168
    if new_node.secondary_ip != new_node.primary_ip:
2169
      result = self.rpc.call_node_has_ip_address(new_node.name,
2170
                                                 new_node.secondary_ip)
2171
      if result.failed or not result.data:
2172
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
2173
                                 " you gave (%s). Please fix and re-run this"
2174
                                 " command." % new_node.secondary_ip)
2175

    
2176
    node_verify_list = [self.cfg.GetMasterNode()]
2177
    node_verify_param = {
2178
      'nodelist': [node],
2179
      # TODO: do a node-net-test as well?
2180
    }
2181

    
2182
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
2183
                                       self.cfg.GetClusterName())
2184
    for verifier in node_verify_list:
2185
      if result[verifier].failed or not result[verifier].data:
2186
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
2187
                                 " for remote verification" % verifier)
2188
      if result[verifier].data['nodelist']:
2189
        for failed in result[verifier].data['nodelist']:
2190
          feedback_fn("ssh/hostname verification failed %s -> %s" %
2191
                      (verifier, result[verifier].data['nodelist'][failed]))
2192
        raise errors.OpExecError("ssh/hostname verification failed.")
2193

    
2194
    # Distribute updated /etc/hosts and known_hosts to all nodes,
2195
    # including the node just added
2196
    myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
2197
    dist_nodes = self.cfg.GetNodeList()
2198
    if not self.op.readd:
2199
      dist_nodes.append(node)
2200
    if myself.name in dist_nodes:
2201
      dist_nodes.remove(myself.name)
2202

    
2203
    logging.debug("Copying hosts and known_hosts to all nodes")
2204
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
2205
      result = self.rpc.call_upload_file(dist_nodes, fname)
2206
      for to_node, to_result in result.iteritems():
2207
        if to_result.failed or not to_result.data:
2208
          logging.error("Copy of file %s to node %s failed", fname, to_node)
2209

    
2210
    to_copy = []
2211
    enabled_hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
2212
    if constants.HTS_USE_VNC.intersection(enabled_hypervisors):
2213
      to_copy.append(constants.VNC_PASSWORD_FILE)
2214

    
2215
    for fname in to_copy:
2216
      result = self.rpc.call_upload_file([node], fname)
2217
      if result[node].failed or not result[node]:
2218
        logging.error("Could not copy file %s to node %s", fname, node)
2219

    
2220
    if self.op.readd:
2221
      self.context.ReaddNode(new_node)
2222
    else:
2223
      self.context.AddNode(new_node)
2224

    
2225

    
2226
class LUSetNodeParams(LogicalUnit):
2227
  """Modifies the parameters of a node.
2228

2229
  """
2230
  HPATH = "node-modify"
2231
  HTYPE = constants.HTYPE_NODE
2232
  _OP_REQP = ["node_name"]
2233
  REQ_BGL = False
2234

    
2235
  def CheckArguments(self):
2236
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2237
    if node_name is None:
2238
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2239
    self.op.node_name = node_name
2240
    _CheckBooleanOpField(self.op, 'master_candidate')
2241
    _CheckBooleanOpField(self.op, 'offline')
2242
    _CheckBooleanOpField(self.op, 'drained')
2243
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
2244
    if all_mods.count(None) == 3:
2245
      raise errors.OpPrereqError("Please pass at least one modification")
2246
    if all_mods.count(True) > 1:
2247
      raise errors.OpPrereqError("Can't set the node into more than one"
2248
                                 " state at the same time")
2249

    
2250
  def ExpandNames(self):
2251
    self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
2252

    
2253
  def BuildHooksEnv(self):
2254
    """Build hooks env.
2255

2256
    This runs on the master node.
2257

2258
    """
2259
    env = {
2260
      "OP_TARGET": self.op.node_name,
2261
      "MASTER_CANDIDATE": str(self.op.master_candidate),
2262
      "OFFLINE": str(self.op.offline),
2263
      "DRAINED": str(self.op.drained),
2264
      }
2265
    nl = [self.cfg.GetMasterNode(),
2266
          self.op.node_name]
2267
    return env, nl, nl
2268

    
2269
  def CheckPrereq(self):
2270
    """Check prerequisites.
2271

2272
    This only checks the instance list against the existing names.
2273

2274
    """
2275
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
2276

    
2277
    if ((self.op.master_candidate == False or self.op.offline == True or
2278
         self.op.drained == True) and node.master_candidate):
2279
      # we will demote the node from master_candidate
2280
      if self.op.node_name == self.cfg.GetMasterNode():
2281
        raise errors.OpPrereqError("The master node has to be a"
2282
                                   " master candidate, online and not drained")
2283
      cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2284
      num_candidates, _ = self.cfg.GetMasterCandidateStats()
2285
      if num_candidates <= cp_size:
2286
        msg = ("Not enough master candidates (desired"
2287
               " %d, new value will be %d)" % (cp_size, num_candidates-1))
2288
        if self.op.force:
2289
          self.LogWarning(msg)
2290
        else:
2291
          raise errors.OpPrereqError(msg)
2292

    
2293
    if (self.op.master_candidate == True and
2294
        ((node.offline and not self.op.offline == False) or
2295
         (node.drained and not self.op.drained == False))):
2296
      raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
2297
                                 " to master_candidate")
2298

    
2299
    return
2300

    
2301
  def Exec(self, feedback_fn):
2302
    """Modifies a node.
2303

2304
    """
2305
    node = self.node
2306

    
2307
    result = []
2308
    changed_mc = False
2309

    
2310
    if self.op.offline is not None:
2311
      node.offline = self.op.offline
2312
      result.append(("offline", str(self.op.offline)))
2313
      if self.op.offline == True:
2314
        if node.master_candidate:
2315
          node.master_candidate = False
2316
          changed_mc = True
2317
          result.append(("master_candidate", "auto-demotion due to offline"))
2318
        if node.drained:
2319
          node.drained = False
2320
          result.append(("drained", "clear drained status due to offline"))
2321

    
2322
    if self.op.master_candidate is not None:
2323
      node.master_candidate = self.op.master_candidate
2324
      changed_mc = True
2325
      result.append(("master_candidate", str(self.op.master_candidate)))
2326
      if self.op.master_candidate == False:
2327
        rrc = self.rpc.call_node_demote_from_mc(node.name)
2328
        msg = rrc.RemoteFailMsg()
2329
        if msg:
2330
          self.LogWarning("Node failed to demote itself: %s" % msg)
2331

    
2332
    if self.op.drained is not None:
2333
      node.drained = self.op.drained
2334
      if self.op.drained == True:
2335
        if node.master_candidate:
2336
          node.master_candidate = False
2337
          changed_mc = True
2338
          result.append(("master_candidate", "auto-demotion due to drain"))
2339
        if node.offline:
2340
          node.offline = False
2341
          result.append(("offline", "clear offline status due to drain"))
2342

    
2343
    # this will trigger configuration file update, if needed
2344
    self.cfg.Update(node)
2345
    # this will trigger job queue propagation or cleanup
2346
    if changed_mc:
2347
      self.context.ReaddNode(node)
2348

    
2349
    return result
2350

    
2351

    
2352
class LUQueryClusterInfo(NoHooksLU):
2353
  """Query cluster configuration.
2354

2355
  """
2356
  _OP_REQP = []
2357
  REQ_BGL = False
2358

    
2359
  def ExpandNames(self):
2360
    self.needed_locks = {}
2361

    
2362
  def CheckPrereq(self):
2363
    """No prerequsites needed for this LU.
2364

2365
    """
2366
    pass
2367

    
2368
  def Exec(self, feedback_fn):
2369
    """Return cluster config.
2370

2371
    """
2372
    cluster = self.cfg.GetClusterInfo()
2373
    result = {
2374
      "software_version": constants.RELEASE_VERSION,
2375
      "protocol_version": constants.PROTOCOL_VERSION,
2376
      "config_version": constants.CONFIG_VERSION,
2377
      "os_api_version": constants.OS_API_VERSION,
2378
      "export_version": constants.EXPORT_VERSION,
2379
      "architecture": (platform.architecture()[0], platform.machine()),
2380
      "name": cluster.cluster_name,
2381
      "master": cluster.master_node,
2382
      "default_hypervisor": cluster.default_hypervisor,
2383
      "enabled_hypervisors": cluster.enabled_hypervisors,
2384
      "hvparams": dict([(hypervisor, cluster.hvparams[hypervisor])
2385
                        for hypervisor in cluster.enabled_hypervisors]),
2386
      "beparams": cluster.beparams,
2387
      "candidate_pool_size": cluster.candidate_pool_size,
2388
      }
2389

    
2390
    return result
2391

    
2392

    
2393
class LUQueryConfigValues(NoHooksLU):
2394
  """Return configuration values.
2395

2396
  """
2397
  _OP_REQP = []
2398
  REQ_BGL = False
2399
  _FIELDS_DYNAMIC = utils.FieldSet()
2400
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag")
2401

    
2402
  def ExpandNames(self):
2403
    self.needed_locks = {}
2404

    
2405
    _CheckOutputFields(static=self._FIELDS_STATIC,
2406
                       dynamic=self._FIELDS_DYNAMIC,
2407
                       selected=self.op.output_fields)
2408

    
2409
  def CheckPrereq(self):
2410
    """No prerequisites.
2411

2412
    """
2413
    pass
2414

    
2415
  def Exec(self, feedback_fn):
2416
    """Dump a representation of the cluster config to the standard output.
2417

2418
    """
2419
    values = []
2420
    for field in self.op.output_fields:
2421
      if field == "cluster_name":
2422
        entry = self.cfg.GetClusterName()
2423
      elif field == "master_node":
2424
        entry = self.cfg.GetMasterNode()
2425
      elif field == "drain_flag":
2426
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2427
      else:
2428
        raise errors.ParameterError(field)
2429
      values.append(entry)
2430
    return values
2431

    
2432

    
2433
class LUActivateInstanceDisks(NoHooksLU):
2434
  """Bring up an instance's disks.
2435

2436
  """
2437
  _OP_REQP = ["instance_name"]
2438
  REQ_BGL = False
2439

    
2440
  def ExpandNames(self):
2441
    self._ExpandAndLockInstance()
2442
    self.needed_locks[locking.LEVEL_NODE] = []
2443
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2444

    
2445
  def DeclareLocks(self, level):
2446
    if level == locking.LEVEL_NODE:
2447
      self._LockInstancesNodes()
2448

    
2449
  def CheckPrereq(self):
2450
    """Check prerequisites.
2451

2452
    This checks that the instance is in the cluster.
2453

2454
    """
2455
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2456
    assert self.instance is not None, \
2457
      "Cannot retrieve locked instance %s" % self.op.instance_name
2458
    _CheckNodeOnline(self, self.instance.primary_node)
2459

    
2460
  def Exec(self, feedback_fn):
2461
    """Activate the disks.
2462

2463
    """
2464
    disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
2465
    if not disks_ok:
2466
      raise errors.OpExecError("Cannot activate block devices")
2467

    
2468
    return disks_info
2469

    
2470

    
2471
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
2472
  """Prepare the block devices for an instance.
2473

2474
  This sets up the block devices on all nodes.
2475

2476
  @type lu: L{LogicalUnit}
2477
  @param lu: the logical unit on whose behalf we execute
2478
  @type instance: L{objects.Instance}
2479
  @param instance: the instance for whose disks we assemble
2480
  @type ignore_secondaries: boolean
2481
  @param ignore_secondaries: if true, errors on secondary nodes
2482
      won't result in an error return from the function
2483
  @return: False if the operation failed, otherwise a list of
2484
      (host, instance_visible_name, node_visible_name)
2485
      with the mapping from node devices to instance devices
2486

2487
  """
2488
  device_info = []
2489
  disks_ok = True
2490
  iname = instance.name
2491
  # With the two passes mechanism we try to reduce the window of
2492
  # opportunity for the race condition of switching DRBD to primary
2493
  # before handshaking occured, but we do not eliminate it
2494

    
2495
  # The proper fix would be to wait (with some limits) until the
2496
  # connection has been made and drbd transitions from WFConnection
2497
  # into any other network-connected state (Connected, SyncTarget,
2498
  # SyncSource, etc.)
2499

    
2500
  # 1st pass, assemble on all nodes in secondary mode
2501
  for inst_disk in instance.disks:
2502
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2503
      lu.cfg.SetDiskID(node_disk, node)
2504
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2505
      msg = result.RemoteFailMsg()
2506
      if msg:
2507
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2508
                           " (is_primary=False, pass=1): %s",
2509
                           inst_disk.iv_name, node, msg)
2510
        if not ignore_secondaries:
2511
          disks_ok = False
2512

    
2513
  # FIXME: race condition on drbd migration to primary
2514

    
2515
  # 2nd pass, do only the primary node
2516
  for inst_disk in instance.disks:
2517
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2518
      if node != instance.primary_node:
2519
        continue
2520
      lu.cfg.SetDiskID(node_disk, node)
2521
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2522
      msg = result.RemoteFailMsg()
2523
      if msg:
2524
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2525
                           " (is_primary=True, pass=2): %s",
2526
                           inst_disk.iv_name, node, msg)
2527
        disks_ok = False
2528
    device_info.append((instance.primary_node, inst_disk.iv_name, result.data))
2529

    
2530
  # leave the disks configured for the primary node
2531
  # this is a workaround that would be fixed better by
2532
  # improving the logical/physical id handling
2533
  for disk in instance.disks:
2534
    lu.cfg.SetDiskID(disk, instance.primary_node)
2535

    
2536
  return disks_ok, device_info
2537

    
2538

    
2539
def _StartInstanceDisks(lu, instance, force):
2540
  """Start the disks of an instance.
2541

2542
  """
2543
  disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2544
                                           ignore_secondaries=force)
2545
  if not disks_ok:
2546
    _ShutdownInstanceDisks(lu, instance)
2547
    if force is not None and not force:
2548
      lu.proc.LogWarning("", hint="If the message above refers to a"
2549
                         " secondary node,"
2550
                         " you can retry the operation using '--force'.")
2551
    raise errors.OpExecError("Disk consistency error")
2552

    
2553

    
2554
class LUDeactivateInstanceDisks(NoHooksLU):
2555
  """Shutdown an instance's disks.
2556

2557
  """
2558
  _OP_REQP = ["instance_name"]
2559
  REQ_BGL = False
2560

    
2561
  def ExpandNames(self):
2562
    self._ExpandAndLockInstance()
2563
    self.needed_locks[locking.LEVEL_NODE] = []
2564
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2565

    
2566
  def DeclareLocks(self, level):
2567
    if level == locking.LEVEL_NODE:
2568
      self._LockInstancesNodes()
2569

    
2570
  def CheckPrereq(self):
2571
    """Check prerequisites.
2572

2573
    This checks that the instance is in the cluster.
2574

2575
    """
2576
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2577
    assert self.instance is not None, \
2578
      "Cannot retrieve locked instance %s" % self.op.instance_name
2579

    
2580
  def Exec(self, feedback_fn):
2581
    """Deactivate the disks
2582

2583
    """
2584
    instance = self.instance
2585
    _SafeShutdownInstanceDisks(self, instance)
2586

    
2587

    
2588
def _SafeShutdownInstanceDisks(lu, instance):
2589
  """Shutdown block devices of an instance.
2590

2591
  This function checks if an instance is running, before calling
2592
  _ShutdownInstanceDisks.
2593

2594
  """
2595
  ins_l = lu.rpc.call_instance_list([instance.primary_node],
2596
                                      [instance.hypervisor])
2597
  ins_l = ins_l[instance.primary_node]
2598
  if ins_l.failed or not isinstance(ins_l.data, list):
2599
    raise errors.OpExecError("Can't contact node '%s'" %
2600
                             instance.primary_node)
2601

    
2602
  if instance.name in ins_l.data:
2603
    raise errors.OpExecError("Instance is running, can't shutdown"
2604
                             " block devices.")
2605

    
2606
  _ShutdownInstanceDisks(lu, instance)
2607

    
2608

    
2609
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2610
  """Shutdown block devices of an instance.
2611

2612
  This does the shutdown on all nodes of the instance.
2613

2614
  If the ignore_primary is false, errors on the primary node are
2615
  ignored.
2616

2617
  """
2618
  all_result = True
2619
  for disk in instance.disks:
2620
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2621
      lu.cfg.SetDiskID(top_disk, node)
2622
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
2623
      msg = result.RemoteFailMsg()
2624
      if msg:
2625
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
2626
                      disk.iv_name, node, msg)
2627
        if not ignore_primary or node != instance.primary_node:
2628
          all_result = False
2629
  return all_result
2630

    
2631

    
2632
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
2633
  """Checks if a node has enough free memory.
2634

2635
  This function check if a given node has the needed amount of free
2636
  memory. In case the node has less memory or we cannot get the
2637
  information from the node, this function raise an OpPrereqError
2638
  exception.
2639

2640
  @type lu: C{LogicalUnit}
2641
  @param lu: a logical unit from which we get configuration data
2642
  @type node: C{str}
2643
  @param node: the node to check
2644
  @type reason: C{str}
2645
  @param reason: string to use in the error message
2646
  @type requested: C{int}
2647
  @param requested: the amount of memory in MiB to check for
2648
  @type hypervisor_name: C{str}
2649
  @param hypervisor_name: the hypervisor to ask for memory stats
2650
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2651
      we cannot check the node
2652

2653
  """
2654
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
2655
  nodeinfo[node].Raise()
2656
  free_mem = nodeinfo[node].data.get('memory_free')
2657
  if not isinstance(free_mem, int):
2658
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2659
                             " was '%s'" % (node, free_mem))
2660
  if requested > free_mem:
2661
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2662
                             " needed %s MiB, available %s MiB" %
2663
                             (node, reason, requested, free_mem))
2664

    
2665

    
2666
class LUStartupInstance(LogicalUnit):
2667
  """Starts an instance.
2668

2669
  """
2670
  HPATH = "instance-start"
2671
  HTYPE = constants.HTYPE_INSTANCE
2672
  _OP_REQP = ["instance_name", "force"]
2673
  REQ_BGL = False
2674

    
2675
  def ExpandNames(self):
2676
    self._ExpandAndLockInstance()
2677

    
2678
  def BuildHooksEnv(self):
2679
    """Build hooks env.
2680

2681
    This runs on master, primary and secondary nodes of the instance.
2682

2683
    """
2684
    env = {
2685
      "FORCE": self.op.force,
2686
      }
2687
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2688
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2689
    return env, nl, nl
2690

    
2691
  def CheckPrereq(self):
2692
    """Check prerequisites.
2693

2694
    This checks that the instance is in the cluster.
2695

2696
    """
2697
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2698
    assert self.instance is not None, \
2699
      "Cannot retrieve locked instance %s" % self.op.instance_name
2700

    
2701
    _CheckNodeOnline(self, instance.primary_node)
2702

    
2703
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2704
    # check bridges existance
2705
    _CheckInstanceBridgesExist(self, instance)
2706

    
2707
    _CheckNodeFreeMemory(self, instance.primary_node,
2708
                         "starting instance %s" % instance.name,
2709
                         bep[constants.BE_MEMORY], instance.hypervisor)
2710

    
2711
  def Exec(self, feedback_fn):
2712
    """Start the instance.
2713

2714
    """
2715
    instance = self.instance
2716
    force = self.op.force
2717
    extra_args = getattr(self.op, "extra_args", "")
2718

    
2719
    self.cfg.MarkInstanceUp(instance.name)
2720

    
2721
    node_current = instance.primary_node
2722

    
2723
    _StartInstanceDisks(self, instance, force)
2724

    
2725
    result = self.rpc.call_instance_start(node_current, instance, extra_args)
2726
    msg = result.RemoteFailMsg()
2727
    if msg:
2728
      _ShutdownInstanceDisks(self, instance)
2729
      raise errors.OpExecError("Could not start instance: %s" % msg)
2730

    
2731

    
2732
class LURebootInstance(LogicalUnit):
2733
  """Reboot an instance.
2734

2735
  """
2736
  HPATH = "instance-reboot"
2737
  HTYPE = constants.HTYPE_INSTANCE
2738
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2739
  REQ_BGL = False
2740

    
2741
  def ExpandNames(self):
2742
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2743
                                   constants.INSTANCE_REBOOT_HARD,
2744
                                   constants.INSTANCE_REBOOT_FULL]:
2745
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2746
                                  (constants.INSTANCE_REBOOT_SOFT,
2747
                                   constants.INSTANCE_REBOOT_HARD,
2748
                                   constants.INSTANCE_REBOOT_FULL))
2749
    self._ExpandAndLockInstance()
2750

    
2751
  def BuildHooksEnv(self):
2752
    """Build hooks env.
2753

2754
    This runs on master, primary and secondary nodes of the instance.
2755

2756
    """
2757
    env = {
2758
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2759
      }
2760
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2761
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2762
    return env, nl, nl
2763

    
2764
  def CheckPrereq(self):
2765
    """Check prerequisites.
2766

2767
    This checks that the instance is in the cluster.
2768

2769
    """
2770
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2771
    assert self.instance is not None, \
2772
      "Cannot retrieve locked instance %s" % self.op.instance_name
2773

    
2774
    _CheckNodeOnline(self, instance.primary_node)
2775

    
2776
    # check bridges existance
2777
    _CheckInstanceBridgesExist(self, instance)
2778

    
2779
  def Exec(self, feedback_fn):
2780
    """Reboot the instance.
2781

2782
    """
2783
    instance = self.instance
2784
    ignore_secondaries = self.op.ignore_secondaries
2785
    reboot_type = self.op.reboot_type
2786
    extra_args = getattr(self.op, "extra_args", "")
2787

    
2788
    node_current = instance.primary_node
2789

    
2790
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2791
                       constants.INSTANCE_REBOOT_HARD]:
2792
      result = self.rpc.call_instance_reboot(node_current, instance,
2793
                                             reboot_type, extra_args)
2794
      if result.failed or not result.data:
2795
        raise errors.OpExecError("Could not reboot instance")
2796
    else:
2797
      if not self.rpc.call_instance_shutdown(node_current, instance):
2798
        raise errors.OpExecError("could not shutdown instance for full reboot")
2799
      _ShutdownInstanceDisks(self, instance)
2800
      _StartInstanceDisks(self, instance, ignore_secondaries)
2801
      result = self.rpc.call_instance_start(node_current, instance, extra_args)
2802
      msg = result.RemoteFailMsg()
2803
      if msg:
2804
        _ShutdownInstanceDisks(self, instance)
2805
        raise errors.OpExecError("Could not start instance for"
2806
                                 " full reboot: %s" % msg)
2807

    
2808
    self.cfg.MarkInstanceUp(instance.name)
2809

    
2810

    
2811
class LUShutdownInstance(LogicalUnit):
2812
  """Shutdown an instance.
2813

2814
  """
2815
  HPATH = "instance-stop"
2816
  HTYPE = constants.HTYPE_INSTANCE
2817
  _OP_REQP = ["instance_name"]
2818
  REQ_BGL = False
2819

    
2820
  def ExpandNames(self):
2821
    self._ExpandAndLockInstance()
2822

    
2823
  def BuildHooksEnv(self):
2824
    """Build hooks env.
2825

2826
    This runs on master, primary and secondary nodes of the instance.
2827

2828
    """
2829
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2830
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2831
    return env, nl, nl
2832

    
2833
  def CheckPrereq(self):
2834
    """Check prerequisites.
2835

2836
    This checks that the instance is in the cluster.
2837

2838
    """
2839
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2840
    assert self.instance is not None, \
2841
      "Cannot retrieve locked instance %s" % self.op.instance_name
2842
    _CheckNodeOnline(self, self.instance.primary_node)
2843

    
2844
  def Exec(self, feedback_fn):
2845
    """Shutdown the instance.
2846

2847
    """
2848
    instance = self.instance
2849
    node_current = instance.primary_node
2850
    self.cfg.MarkInstanceDown(instance.name)
2851
    result = self.rpc.call_instance_shutdown(node_current, instance)
2852
    if result.failed or not result.data:
2853
      self.proc.LogWarning("Could not shutdown instance")
2854

    
2855
    _ShutdownInstanceDisks(self, instance)
2856

    
2857

    
2858
class LUReinstallInstance(LogicalUnit):
2859
  """Reinstall an instance.
2860

2861
  """
2862
  HPATH = "instance-reinstall"
2863
  HTYPE = constants.HTYPE_INSTANCE
2864
  _OP_REQP = ["instance_name"]
2865
  REQ_BGL = False
2866

    
2867
  def ExpandNames(self):
2868
    self._ExpandAndLockInstance()
2869

    
2870
  def BuildHooksEnv(self):
2871
    """Build hooks env.
2872

2873
    This runs on master, primary and secondary nodes of the instance.
2874

2875
    """
2876
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2877
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2878
    return env, nl, nl
2879

    
2880
  def CheckPrereq(self):
2881
    """Check prerequisites.
2882

2883
    This checks that the instance is in the cluster and is not running.
2884

2885
    """
2886
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2887
    assert instance is not None, \
2888
      "Cannot retrieve locked instance %s" % self.op.instance_name
2889
    _CheckNodeOnline(self, instance.primary_node)
2890

    
2891
    if instance.disk_template == constants.DT_DISKLESS:
2892
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2893
                                 self.op.instance_name)
2894
    if instance.admin_up:
2895
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2896
                                 self.op.instance_name)
2897
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2898
                                              instance.name,
2899
                                              instance.hypervisor)
2900
    if remote_info.failed or remote_info.data:
2901
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2902
                                 (self.op.instance_name,
2903
                                  instance.primary_node))
2904

    
2905
    self.op.os_type = getattr(self.op, "os_type", None)
2906
    if self.op.os_type is not None:
2907
      # OS verification
2908
      pnode = self.cfg.GetNodeInfo(
2909
        self.cfg.ExpandNodeName(instance.primary_node))
2910
      if pnode is None:
2911
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2912
                                   self.op.pnode)
2913
      result = self.rpc.call_os_get(pnode.name, self.op.os_type)
2914
      result.Raise()
2915
      if not isinstance(result.data, objects.OS):
2916
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2917
                                   " primary node"  % self.op.os_type)
2918

    
2919
    self.instance = instance
2920

    
2921
  def Exec(self, feedback_fn):
2922
    """Reinstall the instance.
2923

2924
    """
2925
    inst = self.instance
2926

    
2927
    if self.op.os_type is not None:
2928
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2929
      inst.os = self.op.os_type
2930
      self.cfg.Update(inst)
2931

    
2932
    _StartInstanceDisks(self, inst, None)
2933
    try:
2934
      feedback_fn("Running the instance OS create scripts...")
2935
      result = self.rpc.call_instance_os_add(inst.primary_node, inst)
2936
      msg = result.RemoteFailMsg()
2937
      if msg:
2938
        raise errors.OpExecError("Could not install OS for instance %s"
2939
                                 " on node %s: %s" %
2940
                                 (inst.name, inst.primary_node, msg))
2941
    finally:
2942
      _ShutdownInstanceDisks(self, inst)
2943

    
2944

    
2945
class LURenameInstance(LogicalUnit):
2946
  """Rename an instance.
2947

2948
  """
2949
  HPATH = "instance-rename"
2950
  HTYPE = constants.HTYPE_INSTANCE
2951
  _OP_REQP = ["instance_name", "new_name"]
2952

    
2953
  def BuildHooksEnv(self):
2954
    """Build hooks env.
2955

2956
    This runs on master, primary and secondary nodes of the instance.
2957

2958
    """
2959
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2960
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2961
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2962
    return env, nl, nl
2963

    
2964
  def CheckPrereq(self):
2965
    """Check prerequisites.
2966

2967
    This checks that the instance is in the cluster and is not running.
2968

2969
    """
2970
    instance = self.cfg.GetInstanceInfo(
2971
      self.cfg.ExpandInstanceName(self.op.instance_name))
2972
    if instance is None:
2973
      raise errors.OpPrereqError("Instance '%s' not known" %
2974
                                 self.op.instance_name)
2975
    _CheckNodeOnline(self, instance.primary_node)
2976

    
2977
    if instance.admin_up:
2978
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2979
                                 self.op.instance_name)
2980
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2981
                                              instance.name,
2982
                                              instance.hypervisor)
2983
    remote_info.Raise()
2984
    if remote_info.data:
2985
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2986
                                 (self.op.instance_name,
2987
                                  instance.primary_node))
2988
    self.instance = instance
2989

    
2990
    # new name verification
2991
    name_info = utils.HostInfo(self.op.new_name)
2992

    
2993
    self.op.new_name = new_name = name_info.name
2994
    instance_list = self.cfg.GetInstanceList()
2995
    if new_name in instance_list:
2996
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2997
                                 new_name)
2998

    
2999
    if not getattr(self.op, "ignore_ip", False):
3000
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
3001
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3002
                                   (name_info.ip, new_name))
3003

    
3004

    
3005
  def Exec(self, feedback_fn):
3006
    """Reinstall the instance.
3007

3008
    """
3009
    inst = self.instance
3010
    old_name = inst.name
3011

    
3012
    if inst.disk_template == constants.DT_FILE:
3013
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3014

    
3015
    self.cfg.RenameInstance(inst.name, self.op.new_name)
3016
    # Change the instance lock. This is definitely safe while we hold the BGL
3017
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
3018
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
3019

    
3020
    # re-read the instance from the configuration after rename
3021
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
3022

    
3023
    if inst.disk_template == constants.DT_FILE:
3024
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3025
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
3026
                                                     old_file_storage_dir,
3027
                                                     new_file_storage_dir)
3028
      result.Raise()
3029
      if not result.data:
3030
        raise errors.OpExecError("Could not connect to node '%s' to rename"
3031
                                 " directory '%s' to '%s' (but the instance"
3032
                                 " has been renamed in Ganeti)" % (
3033
                                 inst.primary_node, old_file_storage_dir,
3034
                                 new_file_storage_dir))
3035

    
3036
      if not result.data[0]:
3037
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
3038
                                 " (but the instance has been renamed in"
3039
                                 " Ganeti)" % (old_file_storage_dir,
3040
                                               new_file_storage_dir))
3041

    
3042
    _StartInstanceDisks(self, inst, None)
3043
    try:
3044
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
3045
                                                 old_name)
3046
      msg = result.RemoteFailMsg()
3047
      if msg:
3048
        msg = ("Could not run OS rename script for instance %s on node %s"
3049
               " (but the instance has been renamed in Ganeti): %s" %
3050
               (inst.name, inst.primary_node, msg))
3051
        self.proc.LogWarning(msg)
3052
    finally:
3053
      _ShutdownInstanceDisks(self, inst)
3054

    
3055

    
3056
class LURemoveInstance(LogicalUnit):
3057
  """Remove an instance.
3058

3059
  """
3060
  HPATH = "instance-remove"
3061
  HTYPE = constants.HTYPE_INSTANCE
3062
  _OP_REQP = ["instance_name", "ignore_failures"]
3063
  REQ_BGL = False
3064

    
3065
  def ExpandNames(self):
3066
    self._ExpandAndLockInstance()
3067
    self.needed_locks[locking.LEVEL_NODE] = []
3068
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3069

    
3070
  def DeclareLocks(self, level):
3071
    if level == locking.LEVEL_NODE:
3072
      self._LockInstancesNodes()
3073

    
3074
  def BuildHooksEnv(self):
3075
    """Build hooks env.
3076

3077
    This runs on master, primary and secondary nodes of the instance.
3078

3079
    """
3080
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3081
    nl = [self.cfg.GetMasterNode()]
3082
    return env, nl, nl
3083

    
3084
  def CheckPrereq(self):
3085
    """Check prerequisites.
3086

3087
    This checks that the instance is in the cluster.
3088

3089
    """
3090
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3091
    assert self.instance is not None, \
3092
      "Cannot retrieve locked instance %s" % self.op.instance_name
3093

    
3094
  def Exec(self, feedback_fn):
3095
    """Remove the instance.
3096

3097
    """
3098
    instance = self.instance
3099
    logging.info("Shutting down instance %s on node %s",
3100
                 instance.name, instance.primary_node)
3101

    
3102
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
3103
    if result.failed or not result.data:
3104
      if self.op.ignore_failures:
3105
        feedback_fn("Warning: can't shutdown instance")
3106
      else:
3107
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
3108
                                 (instance.name, instance.primary_node))
3109

    
3110
    logging.info("Removing block devices for instance %s", instance.name)
3111

    
3112
    if not _RemoveDisks(self, instance):
3113
      if self.op.ignore_failures:
3114
        feedback_fn("Warning: can't remove instance's disks")
3115
      else:
3116
        raise errors.OpExecError("Can't remove instance's disks")
3117

    
3118
    logging.info("Removing instance %s out of cluster config", instance.name)
3119

    
3120
    self.cfg.RemoveInstance(instance.name)
3121
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
3122

    
3123

    
3124
class LUQueryInstances(NoHooksLU):
3125
  """Logical unit for querying instances.
3126

3127
  """
3128
  _OP_REQP = ["output_fields", "names", "use_locking"]
3129
  REQ_BGL = False
3130
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
3131
                                    "admin_state", "admin_ram",
3132
                                    "disk_template", "ip", "mac", "bridge",
3133
                                    "sda_size", "sdb_size", "vcpus", "tags",
3134
                                    "network_port", "beparams",
3135
                                    "(disk).(size)/([0-9]+)",
3136
                                    "(disk).(sizes)", "disk_usage",
3137
                                    "(nic).(mac|ip|bridge)/([0-9]+)",
3138
                                    "(nic).(macs|ips|bridges)",
3139
                                    "(disk|nic).(count)",
3140
                                    "serial_no", "hypervisor", "hvparams",] +
3141
                                  ["hv/%s" % name
3142
                                   for name in constants.HVS_PARAMETERS] +
3143
                                  ["be/%s" % name
3144
                                   for name in constants.BES_PARAMETERS])
3145
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
3146

    
3147

    
3148
  def ExpandNames(self):
3149
    _CheckOutputFields(static=self._FIELDS_STATIC,
3150
                       dynamic=self._FIELDS_DYNAMIC,
3151
                       selected=self.op.output_fields)
3152

    
3153
    self.needed_locks = {}
3154
    self.share_locks[locking.LEVEL_INSTANCE] = 1
3155
    self.share_locks[locking.LEVEL_NODE] = 1
3156

    
3157
    if self.op.names:
3158
      self.wanted = _GetWantedInstances(self, self.op.names)
3159
    else:
3160
      self.wanted = locking.ALL_SET
3161

    
3162
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
3163
    self.do_locking = self.do_node_query and self.op.use_locking
3164
    if self.do_locking:
3165
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3166
      self.needed_locks[locking.LEVEL_NODE] = []
3167
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3168

    
3169
  def DeclareLocks(self, level):
3170
    if level == locking.LEVEL_NODE and self.do_locking:
3171
      self._LockInstancesNodes()
3172

    
3173
  def CheckPrereq(self):
3174
    """Check prerequisites.
3175

3176
    """
3177
    pass
3178

    
3179
  def Exec(self, feedback_fn):
3180
    """Computes the list of nodes and their attributes.
3181

3182
    """
3183
    all_info = self.cfg.GetAllInstancesInfo()
3184
    if self.wanted == locking.ALL_SET:
3185
      # caller didn't specify instance names, so ordering is not important
3186
      if self.do_locking:
3187
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
3188
      else:
3189
        instance_names = all_info.keys()
3190
      instance_names = utils.NiceSort(instance_names)
3191
    else:
3192
      # caller did specify names, so we must keep the ordering
3193
      if self.do_locking:
3194
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
3195
      else:
3196
        tgt_set = all_info.keys()
3197
      missing = set(self.wanted).difference(tgt_set)
3198
      if missing:
3199
        raise errors.OpExecError("Some instances were removed before"
3200
                                 " retrieving their data: %s" % missing)
3201
      instance_names = self.wanted
3202

    
3203
    instance_list = [all_info[iname] for iname in instance_names]
3204

    
3205
    # begin data gathering
3206

    
3207
    nodes = frozenset([inst.primary_node for inst in instance_list])
3208
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
3209

    
3210
    bad_nodes = []
3211
    off_nodes = []
3212
    if self.do_node_query:
3213
      live_data = {}
3214
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
3215
      for name in nodes:
3216
        result = node_data[name]
3217
        if result.offline:
3218
          # offline nodes will be in both lists
3219
          off_nodes.append(name)
3220
        if result.failed:
3221
          bad_nodes.append(name)
3222
        else:
3223
          if result.data:
3224
            live_data.update(result.data)
3225
            # else no instance is alive
3226
    else:
3227
      live_data = dict([(name, {}) for name in instance_names])
3228

    
3229
    # end data gathering
3230

    
3231
    HVPREFIX = "hv/"
3232
    BEPREFIX = "be/"
3233
    output = []
3234
    for instance in instance_list:
3235
      iout = []
3236
      i_hv = self.cfg.GetClusterInfo().FillHV(instance)
3237
      i_be = self.cfg.GetClusterInfo().FillBE(instance)
3238
      for field in self.op.output_fields:
3239
        st_match = self._FIELDS_STATIC.Matches(field)
3240
        if field == "name":
3241
          val = instance.name
3242
        elif field == "os":
3243
          val = instance.os
3244
        elif field == "pnode":
3245
          val = instance.primary_node
3246
        elif field == "snodes":
3247
          val = list(instance.secondary_nodes)
3248
        elif field == "admin_state":
3249
          val = instance.admin_up
3250
        elif field == "oper_state":
3251
          if instance.primary_node in bad_nodes:
3252
            val = None
3253
          else:
3254
            val = bool(live_data.get(instance.name))
3255
        elif field == "status":
3256
          if instance.primary_node in off_nodes:
3257
            val = "ERROR_nodeoffline"
3258
          elif instance.primary_node in bad_nodes:
3259
            val = "ERROR_nodedown"
3260
          else:
3261
            running = bool(live_data.get(instance.name))
3262
            if running:
3263
              if instance.admin_up:
3264
                val = "running"
3265
              else:
3266
                val = "ERROR_up"
3267
            else:
3268
              if instance.admin_up:
3269
                val = "ERROR_down"
3270
              else:
3271
                val = "ADMIN_down"
3272
        elif field == "oper_ram":
3273
          if instance.primary_node in bad_nodes:
3274
            val = None
3275
          elif instance.name in live_data:
3276
            val = live_data[instance.name].get("memory", "?")
3277
          else:
3278
            val = "-"
3279
        elif field == "disk_template":
3280
          val = instance.disk_template
3281
        elif field == "ip":
3282
          val = instance.nics[0].ip
3283
        elif field == "bridge":
3284
          val = instance.nics[0].bridge
3285
        elif field == "mac":
3286
          val = instance.nics[0].mac
3287
        elif field == "sda_size" or field == "sdb_size":
3288
          idx = ord(field[2]) - ord('a')
3289
          try:
3290
            val = instance.FindDisk(idx).size
3291
          except errors.OpPrereqError:
3292
            val = None
3293
        elif field == "disk_usage": # total disk usage per node
3294
          disk_sizes = [{'size': disk.size} for disk in instance.disks]
3295
          val = _ComputeDiskSize(instance.disk_template, disk_sizes)
3296
        elif field == "tags":
3297
          val = list(instance.GetTags())
3298
        elif field == "serial_no":
3299
          val = instance.serial_no
3300
        elif field == "network_port":
3301
          val = instance.network_port
3302
        elif field == "hypervisor":
3303
          val = instance.hypervisor
3304
        elif field == "hvparams":
3305
          val = i_hv
3306
        elif (field.startswith(HVPREFIX) and
3307
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
3308
          val = i_hv.get(field[len(HVPREFIX):], None)
3309
        elif field == "beparams":
3310
          val = i_be
3311
        elif (field.startswith(BEPREFIX) and
3312
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
3313
          val = i_be.get(field[len(BEPREFIX):], None)
3314
        elif st_match and st_match.groups():
3315
          # matches a variable list
3316
          st_groups = st_match.groups()
3317
          if st_groups and st_groups[0] == "disk":
3318
            if st_groups[1] == "count":
3319
              val = len(instance.disks)
3320
            elif st_groups[1] == "sizes":
3321
              val = [disk.size for disk in instance.disks]
3322
            elif st_groups[1] == "size":
3323
              try:
3324
                val = instance.FindDisk(st_groups[2]).size
3325
              except errors.OpPrereqError:
3326
                val = None
3327
            else:
3328
              assert False, "Unhandled disk parameter"
3329
          elif st_groups[0] == "nic":
3330
            if st_groups[1] == "count":
3331
              val = len(instance.nics)
3332
            elif st_groups[1] == "macs":
3333
              val = [nic.mac for nic in instance.nics]
3334
            elif st_groups[1] == "ips":
3335
              val = [nic.ip for nic in instance.nics]
3336
            elif st_groups[1] == "bridges":
3337
              val = [nic.bridge for nic in instance.nics]
3338
            else:
3339
              # index-based item
3340
              nic_idx = int(st_groups[2])
3341
              if nic_idx >= len(instance.nics):
3342
                val = None
3343
              else:
3344
                if st_groups[1] == "mac":
3345
                  val = instance.nics[nic_idx].mac
3346
                elif st_groups[1] == "ip":
3347
                  val = instance.nics[nic_idx].ip
3348
                elif st_groups[1] == "bridge":
3349
                  val = instance.nics[nic_idx].bridge
3350
                else:
3351
                  assert False, "Unhandled NIC parameter"
3352
          else:
3353
            assert False, "Unhandled variable parameter"
3354
        else:
3355
          raise errors.ParameterError(field)
3356
        iout.append(val)
3357
      output.append(iout)
3358

    
3359
    return output
3360

    
3361

    
3362
class LUFailoverInstance(LogicalUnit):
3363
  """Failover an instance.
3364

3365
  """
3366
  HPATH = "instance-failover"
3367
  HTYPE = constants.HTYPE_INSTANCE
3368
  _OP_REQP = ["instance_name", "ignore_consistency"]
3369
  REQ_BGL = False
3370

    
3371
  def ExpandNames(self):
3372
    self._ExpandAndLockInstance()
3373
    self.needed_locks[locking.LEVEL_NODE] = []
3374
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3375

    
3376
  def DeclareLocks(self, level):
3377
    if level == locking.LEVEL_NODE:
3378
      self._LockInstancesNodes()
3379

    
3380
  def BuildHooksEnv(self):
3381
    """Build hooks env.
3382

3383
    This runs on master, primary and secondary nodes of the instance.
3384

3385
    """
3386
    env = {
3387
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
3388
      }
3389
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3390
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3391
    return env, nl, nl
3392

    
3393
  def CheckPrereq(self):
3394
    """Check prerequisites.
3395

3396
    This checks that the instance is in the cluster.
3397

3398
    """
3399
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3400
    assert self.instance is not None, \
3401
      "Cannot retrieve locked instance %s" % self.op.instance_name
3402

    
3403
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3404
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3405
      raise errors.OpPrereqError("Instance's disk layout is not"
3406
                                 " network mirrored, cannot failover.")
3407

    
3408
    secondary_nodes = instance.secondary_nodes
3409
    if not secondary_nodes:
3410
      raise errors.ProgrammerError("no secondary node but using "
3411
                                   "a mirrored disk template")
3412

    
3413
    target_node = secondary_nodes[0]
3414
    _CheckNodeOnline(self, target_node)
3415
    _CheckNodeNotDrained(self, target_node)
3416
    # check memory requirements on the secondary node
3417
    _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
3418
                         instance.name, bep[constants.BE_MEMORY],
3419
                         instance.hypervisor)
3420

    
3421
    # check bridge existance
3422
    brlist = [nic.bridge for nic in instance.nics]
3423
    result = self.rpc.call_bridges_exist(target_node, brlist)
3424
    result.Raise()
3425
    if not result.data:
3426
      raise errors.OpPrereqError("One or more target bridges %s does not"
3427
                                 " exist on destination node '%s'" %
3428
                                 (brlist, target_node))
3429

    
3430
  def Exec(self, feedback_fn):
3431
    """Failover an instance.
3432

3433
    The failover is done by shutting it down on its present node and
3434
    starting it on the secondary.
3435

3436
    """
3437
    instance = self.instance
3438

    
3439
    source_node = instance.primary_node
3440
    target_node = instance.secondary_nodes[0]
3441

    
3442
    feedback_fn("* checking disk consistency between source and target")
3443
    for dev in instance.disks:
3444
      # for drbd, these are drbd over lvm
3445
      if not _CheckDiskConsistency(self, dev, target_node, False):
3446
        if instance.admin_up and not self.op.ignore_consistency:
3447
          raise errors.OpExecError("Disk %s is degraded on target node,"
3448
                                   " aborting failover." % dev.iv_name)
3449

    
3450
    feedback_fn("* shutting down instance on source node")
3451
    logging.info("Shutting down instance %s on node %s",
3452
                 instance.name, source_node)
3453

    
3454
    result = self.rpc.call_instance_shutdown(source_node, instance)
3455
    if result.failed or not result.data:
3456
      if self.op.ignore_consistency:
3457
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
3458
                             " Proceeding"
3459
                             " anyway. Please make sure node %s is down",
3460
                             instance.name, source_node, source_node)
3461
      else:
3462
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
3463
                                 (instance.name, source_node))
3464

    
3465
    feedback_fn("* deactivating the instance's disks on source node")
3466
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3467
      raise errors.OpExecError("Can't shut down the instance's disks.")
3468

    
3469
    instance.primary_node = target_node
3470
    # distribute new instance config to the other nodes
3471
    self.cfg.Update(instance)
3472

    
3473
    # Only start the instance if it's marked as up
3474
    if instance.admin_up:
3475
      feedback_fn("* activating the instance's disks on target node")
3476
      logging.info("Starting instance %s on node %s",
3477
                   instance.name, target_node)
3478

    
3479
      disks_ok, dummy = _AssembleInstanceDisks(self, instance,
3480
                                               ignore_secondaries=True)
3481
      if not disks_ok:
3482
        _ShutdownInstanceDisks(self, instance)
3483
        raise errors.OpExecError("Can't activate the instance's disks")
3484

    
3485
      feedback_fn("* starting the instance on the target node")
3486
      result = self.rpc.call_instance_start(target_node, instance, None)
3487
      msg = result.RemoteFailMsg()
3488
      if msg:
3489
        _ShutdownInstanceDisks(self, instance)
3490
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
3491
                                 (instance.name, target_node, msg))
3492

    
3493

    
3494
class LUMigrateInstance(LogicalUnit):
3495
  """Migrate an instance.
3496

3497
  This is migration without shutting down, compared to the failover,
3498
  which is done with shutdown.
3499

3500
  """
3501
  HPATH = "instance-migrate"
3502
  HTYPE = constants.HTYPE_INSTANCE
3503
  _OP_REQP = ["instance_name", "live", "cleanup"]
3504

    
3505
  REQ_BGL = False
3506

    
3507
  def ExpandNames(self):
3508
    self._ExpandAndLockInstance()
3509
    self.needed_locks[locking.LEVEL_NODE] = []
3510
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3511

    
3512
  def DeclareLocks(self, level):
3513
    if level == locking.LEVEL_NODE:
3514
      self._LockInstancesNodes()
3515

    
3516
  def BuildHooksEnv(self):
3517
    """Build hooks env.
3518

3519
    This runs on master, primary and secondary nodes of the instance.
3520

3521
    """
3522
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3523
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3524
    return env, nl, nl
3525

    
3526
  def CheckPrereq(self):
3527
    """Check prerequisites.
3528

3529
    This checks that the instance is in the cluster.
3530

3531
    """
3532
    instance = self.cfg.GetInstanceInfo(
3533
      self.cfg.ExpandInstanceName(self.op.instance_name))
3534
    if instance is None:
3535
      raise errors.OpPrereqError("Instance '%s' not known" %
3536
                                 self.op.instance_name)
3537

    
3538
    if instance.disk_template != constants.DT_DRBD8:
3539
      raise errors.OpPrereqError("Instance's disk layout is not"
3540
                                 " drbd8, cannot migrate.")
3541

    
3542
    secondary_nodes = instance.secondary_nodes
3543
    if not secondary_nodes:
3544
      raise errors.ConfigurationError("No secondary node but using"
3545
                                      " drbd8 disk template")
3546

    
3547
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
3548

    
3549
    target_node = secondary_nodes[0]
3550
    # check memory requirements on the secondary node
3551
    _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
3552
                         instance.name, i_be[constants.BE_MEMORY],
3553
                         instance.hypervisor)
3554

    
3555
    # check bridge existance
3556
    brlist = [nic.bridge for nic in instance.nics]
3557
    result = self.rpc.call_bridges_exist(target_node, brlist)
3558
    if result.failed or not result.data:
3559
      raise errors.OpPrereqError("One or more target bridges %s does not"
3560
                                 " exist on destination node '%s'" %
3561
                                 (brlist, target_node))
3562

    
3563
    if not self.op.cleanup:
3564
      _CheckNodeNotDrained(self, target_node)
3565
      result = self.rpc.call_instance_migratable(instance.primary_node,
3566
                                                 instance)
3567
      msg = result.RemoteFailMsg()
3568
      if msg:
3569
        raise errors.OpPrereqError("Can't migrate: %s - please use failover" %
3570
                                   msg)
3571

    
3572
    self.instance = instance
3573

    
3574
  def _WaitUntilSync(self):
3575
    """Poll with custom rpc for disk sync.
3576

3577
    This uses our own step-based rpc call.
3578

3579
    """
3580
    self.feedback_fn("* wait until resync is done")
3581
    all_done = False
3582
    while not all_done:
3583
      all_done = True
3584
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
3585
                                            self.nodes_ip,
3586
                                            self.instance.disks)
3587
      min_percent = 100
3588
      for node, nres in result.items():
3589
        msg = nres.RemoteFailMsg()
3590
        if msg:
3591
          raise errors.OpExecError("Cannot resync disks on node %s: %s" %
3592
                                   (node, msg))
3593
        node_done, node_percent = nres.payload
3594
        all_done = all_done and node_done
3595
        if node_percent is not None:
3596
          min_percent = min(min_percent, node_percent)
3597
      if not all_done:
3598
        if min_percent < 100:
3599
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
3600
        time.sleep(2)
3601

    
3602
  def _EnsureSecondary(self, node):
3603
    """Demote a node to secondary.
3604

3605
    """
3606
    self.feedback_fn("* switching node %s to secondary mode" % node)
3607

    
3608
    for dev in self.instance.disks:
3609
      self.cfg.SetDiskID(dev, node)
3610

    
3611
    result = self.rpc.call_blockdev_close(node, self.instance.name,
3612
                                          self.instance.disks)
3613
    msg = result.RemoteFailMsg()
3614
    if msg:
3615
      raise errors.OpExecError("Cannot change disk to secondary on node %s,"
3616
                               " error %s" % (node, msg))
3617

    
3618
  def _GoStandalone(self):
3619
    """Disconnect from the network.
3620

3621
    """
3622
    self.feedback_fn("* changing into standalone mode")
3623
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
3624
                                               self.instance.disks)
3625
    for node, nres in result.items():
3626
      msg = nres.RemoteFailMsg()
3627
      if msg:
3628
        raise errors.OpExecError("Cannot disconnect disks node %s,"
3629
                                 " error %s" % (node, msg))
3630

    
3631
  def _GoReconnect(self, multimaster):
3632
    """Reconnect to the network.
3633

3634
    """
3635
    if multimaster:
3636
      msg = "dual-master"
3637
    else:
3638
      msg = "single-master"
3639
    self.feedback_fn("* changing disks into %s mode" % msg)
3640
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
3641
                                           self.instance.disks,
3642
                                           self.instance.name, multimaster)
3643
    for node, nres in result.items():
3644
      msg = nres.RemoteFailMsg()
3645
      if msg:
3646
        raise errors.OpExecError("Cannot change disks config on node %s,"
3647
                                 " error: %s" % (node, msg))
3648

    
3649
  def _ExecCleanup(self):
3650
    """Try to cleanup after a failed migration.
3651

3652
    The cleanup is done by:
3653
      - check that the instance is running only on one node
3654
        (and update the config if needed)
3655
      - change disks on its secondary node to secondary
3656
      - wait until disks are fully synchronized
3657
      - disconnect from the network
3658
      - change disks into single-master mode
3659
      - wait again until disks are fully synchronized
3660

3661
    """
3662
    instance = self.instance
3663
    target_node = self.target_node
3664
    source_node = self.source_node
3665

    
3666
    # check running on only one node
3667
    self.feedback_fn("* checking where the instance actually runs"
3668
                     " (if this hangs, the hypervisor might be in"
3669
                     " a bad state)")
3670
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
3671
    for node, result in ins_l.items():
3672
      result.Raise()
3673
      if not isinstance(result.data, list):
3674
        raise errors.OpExecError("Can't contact node '%s'" % node)
3675

    
3676
    runningon_source = instance.name in ins_l[source_node].data
3677
    runningon_target = instance.name in ins_l[target_node].data
3678

    
3679
    if runningon_source and runningon_target:
3680
      raise errors.OpExecError("Instance seems to be running on two nodes,"
3681
                               " or the hypervisor is confused. You will have"
3682
                               " to ensure manually that it runs only on one"
3683
                               " and restart this operation.")
3684

    
3685
    if not (runningon_source or runningon_target):
3686
      raise errors.OpExecError("Instance does not seem to be running at all."
3687
                               " In this case, it's safer to repair by"
3688
                               " running 'gnt-instance stop' to ensure disk"
3689
                               " shutdown, and then restarting it.")
3690

    
3691
    if runningon_target:
3692
      # the migration has actually succeeded, we need to update the config
3693
      self.feedback_fn("* instance running on secondary node (%s),"
3694
                       " updating config" % target_node)
3695
      instance.primary_node = target_node
3696
      self.cfg.Update(instance)
3697
      demoted_node = source_node
3698
    else:
3699
      self.feedback_fn("* instance confirmed to be running on its"
3700
                       " primary node (%s)" % source_node)
3701
      demoted_node = target_node
3702

    
3703
    self._EnsureSecondary(demoted_node)
3704
    try:
3705
      self._WaitUntilSync()
3706
    except errors.OpExecError:
3707
      # we ignore here errors, since if the device is standalone, it
3708
      # won't be able to sync
3709
      pass
3710
    self._GoStandalone()
3711
    self._GoReconnect(False)
3712
    self._WaitUntilSync()
3713

    
3714
    self.feedback_fn("* done")
3715

    
3716
  def _RevertDiskStatus(self):
3717
    """Try to revert the disk status after a failed migration.
3718

3719
    """
3720
    target_node = self.target_node
3721
    try:
3722
      self._EnsureSecondary(target_node)
3723
      self._GoStandalone()
3724
      self._GoReconnect(False)
3725
      self._WaitUntilSync()
3726
    except errors.OpExecError, err:
3727
      self.LogWarning("Migration failed and I can't reconnect the"
3728
                      " drives: error '%s'\n"
3729
                      "Please look and recover the instance status" %
3730
                      str(err))
3731

    
3732
  def _AbortMigration(self):
3733
    """Call the hypervisor code to abort a started migration.
3734

3735
    """
3736
    instance = self.instance
3737
    target_node = self.target_node
3738
    migration_info = self.migration_info
3739

    
3740
    abort_result = self.rpc.call_finalize_migration(target_node,
3741
                                                    instance,
3742
                                                    migration_info,
3743
                                                    False)
3744
    abort_msg = abort_result.RemoteFailMsg()
3745
    if abort_msg:
3746
      logging.error("Aborting migration failed on target node %s: %s" %
3747
                    (target_node, abort_msg))
3748
      # Don't raise an exception here, as we stil have to try to revert the
3749
      # disk status, even if this step failed.
3750

    
3751
  def _ExecMigration(self):
3752
    """Migrate an instance.
3753

3754
    The migrate is done by:
3755
      - change the disks into dual-master mode
3756
      - wait until disks are fully synchronized again
3757
      - migrate the instance
3758
      - change disks on the new secondary node (the old primary) to secondary
3759
      - wait until disks are fully synchronized
3760
      - change disks into single-master mode
3761

3762
    """
3763
    instance = self.instance
3764
    target_node = self.target_node
3765
    source_node = self.source_node
3766

    
3767
    self.feedback_fn("* checking disk consistency between source and target")
3768
    for dev in instance.disks:
3769
      if not _CheckDiskConsistency(self, dev, target_node, False):
3770
        raise errors.OpExecError("Disk %s is degraded or not fully"
3771
                                 " synchronized on target node,"
3772
                                 " aborting migrate." % dev.iv_name)
3773

    
3774
    # First get the migration information from the remote node
3775
    result = self.rpc.call_migration_info(source_node, instance)
3776
    msg = result.RemoteFailMsg()
3777
    if msg:
3778
      log_err = ("Failed fetching source migration information from %s: %s" %
3779
                 (source_node, msg))
3780
      logging.error(log_err)
3781
      raise errors.OpExecError(log_err)
3782

    
3783
    self.migration_info = migration_info = result.payload
3784

    
3785
    # Then switch the disks to master/master mode
3786
    self._EnsureSecondary(target_node)
3787
    self._GoStandalone()
3788
    self._GoReconnect(True)
3789
    self._WaitUntilSync()
3790

    
3791
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
3792
    result = self.rpc.call_accept_instance(target_node,
3793
                                           instance,
3794
                                           migration_info,
3795
                                           self.nodes_ip[target_node])
3796

    
3797
    msg = result.RemoteFailMsg()
3798
    if msg:
3799
      logging.error("Instance pre-migration failed, trying to revert"
3800
                    " disk status: %s", msg)
3801
      self._AbortMigration()
3802
      self._RevertDiskStatus()
3803
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
3804
                               (instance.name, msg))
3805

    
3806
    self.feedback_fn("* migrating instance to %s" % target_node)
3807
    time.sleep(10)
3808
    result = self.rpc.call_instance_migrate(source_node, instance,
3809
                                            self.nodes_ip[target_node],
3810
                                            self.op.live)
3811
    msg = result.RemoteFailMsg()
3812
    if msg:
3813
      logging.error("Instance migration failed, trying to revert"
3814
                    " disk status: %s", msg)
3815
      self._AbortMigration()
3816
      self._RevertDiskStatus()
3817
      raise errors.OpExecError("Could not migrate instance %s: %s" %
3818
                               (instance.name, msg))
3819
    time.sleep(10)
3820

    
3821
    instance.primary_node = target_node
3822
    # distribute new instance config to the other nodes
3823
    self.cfg.Update(instance)
3824

    
3825
    result = self.rpc.call_finalize_migration(target_node,
3826
                                              instance,
3827
                                              migration_info,
3828
                                              True)
3829
    msg = result.RemoteFailMsg()
3830
    if msg:
3831
      logging.error("Instance migration succeeded, but finalization failed:"
3832
                    " %s" % msg)
3833
      raise errors.OpExecError("Could not finalize instance migration: %s" %
3834
                               msg)
3835

    
3836
    self._EnsureSecondary(source_node)
3837
    self._WaitUntilSync()
3838
    self._GoStandalone()
3839
    self._GoReconnect(False)
3840
    self._WaitUntilSync()
3841

    
3842
    self.feedback_fn("* done")
3843

    
3844
  def Exec(self, feedback_fn):
3845
    """Perform the migration.
3846

3847
    """
3848
    self.feedback_fn = feedback_fn
3849

    
3850
    self.source_node = self.instance.primary_node
3851
    self.target_node = self.instance.secondary_nodes[0]
3852
    self.all_nodes = [self.source_node, self.target_node]
3853
    self.nodes_ip = {
3854
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
3855
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
3856
      }
3857
    if self.op.cleanup:
3858
      return self._ExecCleanup()
3859
    else:
3860
      return self._ExecMigration()
3861

    
3862

    
3863
def _CreateBlockDev(lu, node, instance, device, force_create,
3864
                    info, force_open):
3865
  """Create a tree of block devices on a given node.
3866

3867
  If this device type has to be created on secondaries, create it and
3868
  all its children.
3869

3870
  If not, just recurse to children keeping the same 'force' value.
3871

3872
  @param lu: the lu on whose behalf we execute
3873
  @param node: the node on which to create the device
3874
  @type instance: L{objects.Instance}
3875
  @param instance: the instance which owns the device
3876
  @type device: L{objects.Disk}
3877
  @param device: the device to create
3878
  @type force_create: boolean
3879
  @param force_create: whether to force creation of this device; this
3880
      will be change to True whenever we find a device which has
3881
      CreateOnSecondary() attribute
3882
  @param info: the extra 'metadata' we should attach to the device
3883
      (this will be represented as a LVM tag)
3884
  @type force_open: boolean
3885
  @param force_open: this parameter will be passes to the
3886
      L{backend.BlockdevCreate} function where it specifies
3887
      whether we run on primary or not, and it affects both
3888
      the child assembly and the device own Open() execution
3889

3890
  """
3891
  if device.CreateOnSecondary():
3892
    force_create = True
3893

    
3894
  if device.children:
3895
    for child in device.children:
3896
      _CreateBlockDev(lu, node, instance, child, force_create,
3897
                      info, force_open)
3898

    
3899
  if not force_create:
3900
    return
3901

    
3902
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
3903

    
3904

    
3905
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
3906
  """Create a single block device on a given node.
3907

3908
  This will not recurse over children of the device, so they must be
3909
  created in advance.
3910

3911
  @param lu: the lu on whose behalf we execute
3912
  @param node: the node on which to create the device
3913
  @type instance: L{objects.Instance}
3914
  @param instance: the instance which owns the device
3915
  @type device: L{objects.Disk}
3916
  @param device: the device to create
3917
  @param info: the extra 'metadata' we should attach to the device
3918
      (this will be represented as a LVM tag)
3919
  @type force_open: boolean
3920
  @param force_open: this parameter will be passes to the
3921
      L{backend.BlockdevCreate} function where it specifies
3922
      whether we run on primary or not, and it affects both
3923
      the child assembly and the device own Open() execution
3924

3925
  """
3926
  lu.cfg.SetDiskID(device, node)
3927
  result = lu.rpc.call_blockdev_create(node, device, device.size,
3928
                                       instance.name, force_open, info)
3929
  msg = result.RemoteFailMsg()
3930
  if msg:
3931
    raise errors.OpExecError("Can't create block device %s on"
3932
                             " node %s for instance %s: %s" %
3933
                             (device, node, instance.name, msg))
3934
  if device.physical_id is None:
3935
    device.physical_id = result.payload
3936

    
3937

    
3938
def _GenerateUniqueNames(lu, exts):
3939
  """Generate a suitable LV name.
3940

3941
  This will generate a logical volume name for the given instance.
3942

3943
  """
3944
  results = []
3945
  for val in exts:
3946
    new_id = lu.cfg.GenerateUniqueID()
3947
    results.append("%s%s" % (new_id, val))
3948
  return results
3949

    
3950

    
3951
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
3952
                         p_minor, s_minor):
3953
  """Generate a drbd8 device complete with its children.
3954

3955
  """
3956
  port = lu.cfg.AllocatePort()
3957
  vgname = lu.cfg.GetVGName()
3958
  shared_secret = lu.cfg.GenerateDRBDSecret()
3959
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3960
                          logical_id=(vgname, names[0]))
3961
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3962
                          logical_id=(vgname, names[1]))
3963
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
3964
                          logical_id=(primary, secondary, port,
3965
                                      p_minor, s_minor,
3966
                                      shared_secret),
3967
                          children=[dev_data, dev_meta],
3968
                          iv_name=iv_name)
3969
  return drbd_dev
3970

    
3971

    
3972
def _GenerateDiskTemplate(lu, template_name,
3973
                          instance_name, primary_node,
3974
                          secondary_nodes, disk_info,
3975
                          file_storage_dir, file_driver,
3976
                          base_index):
3977
  """Generate the entire disk layout for a given template type.
3978

3979
  """
3980
  #TODO: compute space requirements
3981

    
3982
  vgname = lu.cfg.GetVGName()
3983
  disk_count = len(disk_info)
3984
  disks = []
3985
  if template_name == constants.DT_DISKLESS:
3986
    pass
3987
  elif template_name == constants.DT_PLAIN:
3988
    if len(secondary_nodes) != 0:
3989
      raise errors.ProgrammerError("Wrong template configuration")
3990

    
3991
    names = _GenerateUniqueNames(lu, [".disk%d" % i
3992
                                      for i in range(disk_count)])
3993
    for idx, disk in enumerate(disk_info):
3994
      disk_index = idx + base_index
3995
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
3996
                              logical_id=(vgname, names[idx]),
3997
                              iv_name="disk/%d" % disk_index,
3998
                              mode=disk["mode"])
3999
      disks.append(disk_dev)
4000
  elif template_name == constants.DT_DRBD8:
4001
    if len(secondary_nodes) != 1:
4002
      raise errors.ProgrammerError("Wrong template configuration")
4003
    remote_node = secondary_nodes[0]
4004
    minors = lu.cfg.AllocateDRBDMinor(
4005
      [primary_node, remote_node] * len(disk_info), instance_name)
4006

    
4007
    names = []
4008
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % i
4009
                                               for i in range(disk_count)]):
4010
      names.append(lv_prefix + "_data")
4011
      names.append(lv_prefix + "_meta")
4012
    for idx, disk in enumerate(disk_info):
4013
      disk_index = idx + base_index
4014
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
4015
                                      disk["size"], names[idx*2:idx*2+2],
4016
                                      "disk/%d" % disk_index,
4017
                                      minors[idx*2], minors[idx*2+1])
4018
      disk_dev.mode = disk["mode"]
4019
      disks.append(disk_dev)
4020
  elif template_name == constants.DT_FILE:
4021
    if len(secondary_nodes) != 0:
4022
      raise errors.ProgrammerError("Wrong template configuration")
4023

    
4024
    for idx, disk in enumerate(disk_info):
4025
      disk_index = idx + base_index
4026
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
4027
                              iv_name="disk/%d" % disk_index,
4028
                              logical_id=(file_driver,
4029
                                          "%s/disk%d" % (file_storage_dir,
4030
                                                         idx)),
4031
                              mode=disk["mode"])
4032
      disks.append(disk_dev)
4033
  else:
4034
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
4035
  return disks
4036

    
4037

    
4038
def _GetInstanceInfoText(instance):
4039
  """Compute that text that should be added to the disk's metadata.
4040

4041
  """
4042
  return "originstname+%s" % instance.name
4043

    
4044

    
4045
def _CreateDisks(lu, instance):
4046
  """Create all disks for an instance.
4047

4048
  This abstracts away some work from AddInstance.
4049

4050
  @type lu: L{LogicalUnit}
4051
  @param lu: the logical unit on whose behalf we execute
4052
  @type instance: L{objects.Instance}
4053
  @param instance: the instance whose disks we should create
4054
  @rtype: boolean
4055
  @return: the success of the creation
4056

4057
  """
4058
  info = _GetInstanceInfoText(instance)
4059
  pnode = instance.primary_node
4060

    
4061
  if instance.disk_template == constants.DT_FILE:
4062
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4063
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
4064

    
4065
    if result.failed or not result.data:
4066
      raise errors.OpExecError("Could not connect to node '%s'" % pnode)
4067

    
4068
    if not result.data[0]:
4069
      raise errors.OpExecError("Failed to create directory '%s'" %
4070
                               file_storage_dir)
4071

    
4072
  # Note: this needs to be kept in sync with adding of disks in
4073
  # LUSetInstanceParams
4074
  for device in instance.disks:
4075
    logging.info("Creating volume %s for instance %s",
4076
                 device.iv_name, instance.name)
4077
    #HARDCODE
4078
    for node in instance.all_nodes:
4079
      f_create = node == pnode
4080
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
4081

    
4082

    
4083
def _RemoveDisks(lu, instance):
4084
  """Remove all disks for an instance.
4085

4086
  This abstracts away some work from `AddInstance()` and
4087
  `RemoveInstance()`. Note that in case some of the devices couldn't
4088
  be removed, the removal will continue with the other ones (compare
4089
  with `_CreateDisks()`).
4090

4091
  @type lu: L{LogicalUnit}
4092
  @param lu: the logical unit on whose behalf we execute
4093
  @type instance: L{objects.Instance}
4094
  @param instance: the instance whose disks we should remove
4095
  @rtype: boolean
4096
  @return: the success of the removal
4097

4098
  """
4099
  logging.info("Removing block devices for instance %s", instance.name)
4100

    
4101
  all_result = True
4102
  for device in instance.disks:
4103
    for node, disk in device.ComputeNodeTree(instance.primary_node):
4104
      lu.cfg.SetDiskID(disk, node)
4105
      msg = lu.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
4106
      if msg:
4107
        lu.LogWarning("Could not remove block device %s on node %s,"
4108
                      " continuing anyway: %s", device.iv_name, node, msg)
4109
        all_result = False
4110

    
4111
  if instance.disk_template == constants.DT_FILE:
4112
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4113
    result = lu.rpc.call_file_storage_dir_remove(instance.primary_node,
4114
                                                 file_storage_dir)
4115
    if result.failed or not result.data:
4116
      logging.error("Could not remove directory '%s'", file_storage_dir)
4117
      all_result = False
4118

    
4119
  return all_result
4120

    
4121

    
4122
def _ComputeDiskSize(disk_template, disks):
4123
  """Compute disk size requirements in the volume group
4124

4125
  """
4126
  # Required free disk space as a function of disk and swap space
4127
  req_size_dict = {
4128
    constants.DT_DISKLESS: None,
4129
    constants.DT_PLAIN: sum(d["size"] for d in disks),
4130
    # 128 MB are added for drbd metadata for each disk
4131
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
4132
    constants.DT_FILE: None,
4133
  }
4134

    
4135
  if disk_template not in req_size_dict:
4136
    raise errors.ProgrammerError("Disk template '%s' size requirement"
4137
                                 " is unknown" %  disk_template)
4138

    
4139
  return req_size_dict[disk_template]
4140

    
4141

    
4142
def _CheckHVParams(lu, nodenames, hvname, hvparams):
4143
  """Hypervisor parameter validation.
4144

4145
  This function abstract the hypervisor parameter validation to be
4146
  used in both instance create and instance modify.
4147

4148
  @type lu: L{LogicalUnit}
4149
  @param lu: the logical unit for which we check
4150
  @type nodenames: list
4151
  @param nodenames: the list of nodes on which we should check
4152
  @type hvname: string
4153
  @param hvname: the name of the hypervisor we should use
4154
  @type hvparams: dict
4155
  @param hvparams: the parameters which we need to check
4156
  @raise errors.OpPrereqError: if the parameters are not valid
4157

4158
  """
4159
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
4160
                                                  hvname,
4161
                                                  hvparams)
4162
  for node in nodenames:
4163
    info = hvinfo[node]
4164
    if info.offline:
4165
      continue
4166
    msg = info.RemoteFailMsg()
4167
    if msg:
4168
      raise errors.OpPrereqError("Hypervisor parameter validation failed:"
4169
                                 " %s" % msg)
4170

    
4171

    
4172
class LUCreateInstance(LogicalUnit):
4173
  """Create an instance.
4174

4175
  """
4176
  HPATH = "instance-add"
4177
  HTYPE = constants.HTYPE_INSTANCE
4178
  _OP_REQP = ["instance_name", "disks", "disk_template",
4179
              "mode", "start",
4180
              "wait_for_sync", "ip_check", "nics",
4181
              "hvparams", "beparams"]
4182
  REQ_BGL = False
4183

    
4184
  def _ExpandNode(self, node):
4185
    """Expands and checks one node name.
4186

4187
    """
4188
    node_full = self.cfg.ExpandNodeName(node)
4189
    if node_full is None:
4190
      raise errors.OpPrereqError("Unknown node %s" % node)
4191
    return node_full
4192

    
4193
  def ExpandNames(self):
4194
    """ExpandNames for CreateInstance.
4195

4196
    Figure out the right locks for instance creation.
4197

4198
    """
4199
    self.needed_locks = {}
4200

    
4201
    # set optional parameters to none if they don't exist
4202
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
4203
      if not hasattr(self.op, attr):
4204
        setattr(self.op, attr, None)
4205

    
4206
    # cheap checks, mostly valid constants given
4207

    
4208
    # verify creation mode
4209
    if self.op.mode not in (constants.INSTANCE_CREATE,
4210
                            constants.INSTANCE_IMPORT):
4211
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
4212
                                 self.op.mode)
4213

    
4214
    # disk template and mirror node verification
4215
    if self.op.disk_template not in constants.DISK_TEMPLATES:
4216
      raise errors.OpPrereqError("Invalid disk template name")
4217

    
4218
    if self.op.hypervisor is None:
4219
      self.op.hypervisor = self.cfg.GetHypervisorType()
4220

    
4221
    cluster = self.cfg.GetClusterInfo()
4222
    enabled_hvs = cluster.enabled_hypervisors
4223
    if self.op.hypervisor not in enabled_hvs:
4224
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
4225
                                 " cluster (%s)" % (self.op.hypervisor,
4226
                                  ",".join(enabled_hvs)))
4227

    
4228
    # check hypervisor parameter syntax (locally)
4229
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
4230
    filled_hvp = cluster.FillDict(cluster.hvparams[self.op.hypervisor],
4231
                                  self.op.hvparams)
4232
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
4233
    hv_type.CheckParameterSyntax(filled_hvp)
4234

    
4235
    # fill and remember the beparams dict
4236
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
4237
    self.be_full = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
4238
                                    self.op.beparams)
4239

    
4240
    #### instance parameters check
4241

    
4242
    # instance name verification
4243
    hostname1 = utils.HostInfo(self.op.instance_name)
4244
    self.op.instance_name = instance_name = hostname1.name
4245

    
4246
    # this is just a preventive check, but someone might still add this
4247
    # instance in the meantime, and creation will fail at lock-add time
4248
    if instance_name in self.cfg.GetInstanceList():
4249
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
4250
                                 instance_name)
4251

    
4252
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
4253

    
4254
    # NIC buildup
4255
    self.nics = []
4256
    for nic in self.op.nics:
4257
      # ip validity checks
4258
      ip = nic.get("ip", None)
4259
      if ip is None or ip.lower() == "none":
4260
        nic_ip = None
4261
      elif ip.lower() == constants.VALUE_AUTO:
4262
        nic_ip = hostname1.ip
4263
      else:
4264
        if not utils.IsValidIP(ip):
4265
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
4266
                                     " like a valid IP" % ip)
4267
        nic_ip = ip
4268

    
4269
      # MAC address verification
4270
      mac = nic.get("mac", constants.VALUE_AUTO)
4271
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4272
        if not utils.IsValidMac(mac.lower()):
4273
          raise errors.OpPrereqError("Invalid MAC address specified: %s" %
4274
                                     mac)
4275
      # bridge verification
4276
      bridge = nic.get("bridge", None)
4277
      if bridge is None:
4278
        bridge = self.cfg.GetDefBridge()
4279
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, bridge=bridge))
4280

    
4281
    # disk checks/pre-build
4282
    self.disks = []
4283
    for disk in self.op.disks:
4284
      mode = disk.get("mode", constants.DISK_RDWR)
4285
      if mode not in constants.DISK_ACCESS_SET:
4286
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
4287
                                   mode)
4288
      size = disk.get("size", None)
4289
      if size is None:
4290
        raise errors.OpPrereqError("Missing disk size")
4291
      try:
4292
        size = int(size)
4293
      except ValueError:
4294
        raise errors.OpPrereqError("Invalid disk size '%s'" % size)
4295
      self.disks.append({"size": size, "mode": mode})
4296

    
4297
    # used in CheckPrereq for ip ping check
4298
    self.check_ip = hostname1.ip
4299

    
4300
    # file storage checks
4301
    if (self.op.file_driver and
4302
        not self.op.file_driver in constants.FILE_DRIVER):
4303
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
4304
                                 self.op.file_driver)
4305

    
4306
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
4307
      raise errors.OpPrereqError("File storage directory path not absolute")
4308

    
4309
    ### Node/iallocator related checks
4310
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
4311
      raise errors.OpPrereqError("One and only one of iallocator and primary"
4312
                                 " node must be given")
4313

    
4314
    if self.op.iallocator:
4315
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4316
    else:
4317
      self.op.pnode = self._ExpandNode(self.op.pnode)
4318
      nodelist = [self.op.pnode]
4319
      if self.op.snode is not None:
4320
        self.op.snode = self._ExpandNode(self.op.snode)
4321
        nodelist.append(self.op.snode)
4322
      self.needed_locks[locking.LEVEL_NODE] = nodelist
4323

    
4324
    # in case of import lock the source node too
4325
    if self.op.mode == constants.INSTANCE_IMPORT:
4326
      src_node = getattr(self.op, "src_node", None)
4327
      src_path = getattr(self.op, "src_path", None)
4328

    
4329
      if src_path is None:
4330
        self.op.src_path = src_path = self.op.instance_name
4331

    
4332
      if src_node is None:
4333
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4334
        self.op.src_node = None
4335
        if os.path.isabs(src_path):
4336
          raise errors.OpPrereqError("Importing an instance from an absolute"
4337
                                     " path requires a source node option.")
4338
      else:
4339
        self.op.src_node = src_node = self._ExpandNode(src_node)
4340
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4341
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
4342
        if not os.path.isabs(src_path):
4343
          self.op.src_path = src_path = \
4344
            os.path.join(constants.EXPORT_DIR, src_path)
4345

    
4346
    else: # INSTANCE_CREATE
4347
      if getattr(self.op, "os_type", None) is None:
4348
        raise errors.OpPrereqError("No guest OS specified")
4349

    
4350
  def _RunAllocator(self):
4351
    """Run the allocator based on input opcode.
4352

4353
    """
4354
    nics = [n.ToDict() for n in self.nics]
4355
    ial = IAllocator(self,
4356
                     mode=constants.IALLOCATOR_MODE_ALLOC,
4357
                     name=self.op.instance_name,
4358
                     disk_template=self.op.disk_template,
4359
                     tags=[],
4360
                     os=self.op.os_type,
4361
                     vcpus=self.be_full[constants.BE_VCPUS],
4362
                     mem_size=self.be_full[constants.BE_MEMORY],
4363
                     disks=self.disks,
4364
                     nics=nics,
4365
                     hypervisor=self.op.hypervisor,
4366
                     )
4367

    
4368
    ial.Run(self.op.iallocator)
4369

    
4370
    if not ial.success:
4371
      raise errors.OpPrereqError("Can't compute nodes using"
4372
                                 " iallocator '%s': %s" % (self.op.iallocator,
4373
                                                           ial.info))
4374
    if len(ial.nodes) != ial.required_nodes:
4375
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4376
                                 " of nodes (%s), required %s" %
4377
                                 (self.op.iallocator, len(ial.nodes),
4378
                                  ial.required_nodes))
4379
    self.op.pnode = ial.nodes[0]
4380
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
4381
                 self.op.instance_name, self.op.iallocator,
4382
                 ", ".join(ial.nodes))
4383
    if ial.required_nodes == 2:
4384
      self.op.snode = ial.nodes[1]
4385

    
4386
  def BuildHooksEnv(self):
4387
    """Build hooks env.
4388

4389
    This runs on master, primary and secondary nodes of the instance.
4390

4391
    """
4392
    env = {
4393
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
4394
      "INSTANCE_DISK_SIZE": ",".join(str(d["size"]) for d in self.disks),
4395
      "INSTANCE_ADD_MODE": self.op.mode,
4396
      }
4397
    if self.op.mode == constants.INSTANCE_IMPORT:
4398
      env["INSTANCE_SRC_NODE"] = self.op.src_node
4399
      env["INSTANCE_SRC_PATH"] = self.op.src_path
4400
      env["INSTANCE_SRC_IMAGES"] = self.src_images
4401

    
4402
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
4403
      primary_node=self.op.pnode,
4404
      secondary_nodes=self.secondaries,
4405
      status=self.op.start,
4406
      os_type=self.op.os_type,
4407
      memory=self.be_full[constants.BE_MEMORY],
4408
      vcpus=self.be_full[constants.BE_VCPUS],
4409
      nics=[(n.ip, n.bridge, n.mac) for n in self.nics],
4410
    ))
4411

    
4412
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
4413
          self.secondaries)
4414
    return env, nl, nl
4415

    
4416

    
4417
  def CheckPrereq(self):
4418
    """Check prerequisites.
4419

4420
    """
4421
    if (not self.cfg.GetVGName() and
4422
        self.op.disk_template not in constants.DTS_NOT_LVM):
4423
      raise errors.OpPrereqError("Cluster does not support lvm-based"
4424
                                 " instances")
4425

    
4426

    
4427
    if self.op.mode == constants.INSTANCE_IMPORT:
4428
      src_node = self.op.src_node
4429
      src_path = self.op.src_path
4430

    
4431
      if src_node is None:
4432
        exp_list = self.rpc.call_export_list(
4433
          self.acquired_locks[locking.LEVEL_NODE])
4434
        found = False
4435
        for node in exp_list:
4436
          if not exp_list[node].failed and src_path in exp_list[node].data:
4437
            found = True
4438
            self.op.src_node = src_node = node
4439
            self.op.src_path = src_path = os.path.join(constants.EXPORT_DIR,
4440
                                                       src_path)
4441
            break
4442
        if not found:
4443
          raise errors.OpPrereqError("No export found for relative path %s" %
4444
                                      src_path)
4445

    
4446
      _CheckNodeOnline(self, src_node)
4447
      result = self.rpc.call_export_info(src_node, src_path)
4448
      result.Raise()
4449
      if not result.data:
4450
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
4451

    
4452
      export_info = result.data
4453
      if not export_info.has_section(constants.INISECT_EXP):
4454
        raise errors.ProgrammerError("Corrupted export config")
4455

    
4456
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
4457
      if (int(ei_version) != constants.EXPORT_VERSION):
4458
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
4459
                                   (ei_version, constants.EXPORT_VERSION))
4460

    
4461
      # Check that the new instance doesn't have less disks than the export
4462
      instance_disks = len(self.disks)
4463
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
4464
      if instance_disks < export_disks:
4465
        raise errors.OpPrereqError("Not enough disks to import."
4466
                                   " (instance: %d, export: %d)" %
4467
                                   (instance_disks, export_disks))
4468

    
4469
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
4470
      disk_images = []
4471
      for idx in range(export_disks):
4472
        option = 'disk%d_dump' % idx
4473
        if export_info.has_option(constants.INISECT_INS, option):
4474
          # FIXME: are the old os-es, disk sizes, etc. useful?
4475
          export_name = export_info.get(constants.INISECT_INS, option)
4476
          image = os.path.join(src_path, export_name)
4477
          disk_images.append(image)
4478
        else:
4479
          disk_images.append(False)
4480

    
4481
      self.src_images = disk_images
4482

    
4483
      old_name = export_info.get(constants.INISECT_INS, 'name')
4484
      # FIXME: int() here could throw a ValueError on broken exports
4485
      exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
4486
      if self.op.instance_name == old_name:
4487
        for idx, nic in enumerate(self.nics):
4488
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
4489
            nic_mac_ini = 'nic%d_mac' % idx
4490
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
4491

    
4492
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
4493
    if self.op.start and not self.op.ip_check:
4494
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
4495
                                 " adding an instance in start mode")
4496

    
4497
    if self.op.ip_check:
4498
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
4499
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
4500
                                   (self.check_ip, self.op.instance_name))
4501

    
4502
    #### allocator run
4503

    
4504
    if self.op.iallocator is not None:
4505
      self._RunAllocator()
4506

    
4507
    #### node related checks
4508

    
4509
    # check primary node
4510
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
4511
    assert self.pnode is not None, \
4512
      "Cannot retrieve locked node %s" % self.op.pnode
4513
    if pnode.offline:
4514
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
4515
                                 pnode.name)
4516
    if pnode.drained:
4517
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
4518
                                 pnode.name)
4519

    
4520
    self.secondaries = []
4521

    
4522
    # mirror node verification
4523
    if self.op.disk_template in constants.DTS_NET_MIRROR:
4524
      if self.op.snode is None:
4525
        raise errors.OpPrereqError("The networked disk templates need"
4526
                                   " a mirror node")
4527
      if self.op.snode == pnode.name:
4528
        raise errors.OpPrereqError("The secondary node cannot be"
4529
                                   " the primary node.")
4530
      _CheckNodeOnline(self, self.op.snode)
4531
      _CheckNodeNotDrained(self, self.op.snode)
4532
      self.secondaries.append(self.op.snode)
4533

    
4534
    nodenames = [pnode.name] + self.secondaries
4535

    
4536
    req_size = _ComputeDiskSize(self.op.disk_template,
4537
                                self.disks)
4538

    
4539
    # Check lv size requirements
4540
    if req_size is not None:
4541
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4542
                                         self.op.hypervisor)
4543
      for node in nodenames:
4544
        info = nodeinfo[node]
4545
        info.Raise()
4546
        info = info.data
4547
        if not info:
4548
          raise errors.OpPrereqError("Cannot get current information"
4549
                                     " from node '%s'" % node)
4550
        vg_free = info.get('vg_free', None)
4551
        if not isinstance(vg_free, int):
4552
          raise errors.OpPrereqError("Can't compute free disk space on"
4553
                                     " node %s" % node)
4554
        if req_size > info['vg_free']:
4555
          raise errors.OpPrereqError("Not enough disk space on target node %s."
4556
                                     " %d MB available, %d MB required" %
4557
                                     (node, info['vg_free'], req_size))
4558

    
4559
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
4560

    
4561
    # os verification
4562
    result = self.rpc.call_os_get(pnode.name, self.op.os_type)
4563
    result.Raise()
4564
    if not isinstance(result.data, objects.OS):
4565
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
4566
                                 " primary node"  % self.op.os_type)
4567

    
4568
    # bridge check on primary node
4569
    bridges = [n.bridge for n in self.nics]
4570
    result = self.rpc.call_bridges_exist(self.pnode.name, bridges)
4571
    result.Raise()
4572
    if not result.data:
4573
      raise errors.OpPrereqError("One of the target bridges '%s' does not"
4574
                                 " exist on destination node '%s'" %
4575
                                 (",".join(bridges), pnode.name))
4576

    
4577
    # memory check on primary node
4578
    if self.op.start:
4579
      _CheckNodeFreeMemory(self, self.pnode.name,
4580
                           "creating instance %s" % self.op.instance_name,
4581
                           self.be_full[constants.BE_MEMORY],
4582
                           self.op.hypervisor)
4583

    
4584
  def Exec(self, feedback_fn):
4585
    """Create and add the instance to the cluster.
4586

4587
    """
4588
    instance = self.op.instance_name
4589
    pnode_name = self.pnode.name
4590

    
4591
    for nic in self.nics:
4592
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4593
        nic.mac = self.cfg.GenerateMAC()
4594

    
4595
    ht_kind = self.op.hypervisor
4596
    if ht_kind in constants.HTS_REQ_PORT:
4597
      network_port = self.cfg.AllocatePort()
4598
    else:
4599
      network_port = None
4600

    
4601
    ##if self.op.vnc_bind_address is None:
4602
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
4603

    
4604
    # this is needed because os.path.join does not accept None arguments
4605
    if self.op.file_storage_dir is None:
4606
      string_file_storage_dir = ""
4607
    else:
4608
      string_file_storage_dir = self.op.file_storage_dir
4609

    
4610
    # build the full file storage dir path
4611
    file_storage_dir = os.path.normpath(os.path.join(
4612
                                        self.cfg.GetFileStorageDir(),
4613
                                        string_file_storage_dir, instance))
4614

    
4615

    
4616
    disks = _GenerateDiskTemplate(self,
4617
                                  self.op.disk_template,
4618
                                  instance, pnode_name,
4619
                                  self.secondaries,
4620
                                  self.disks,
4621
                                  file_storage_dir,
4622
                                  self.op.file_driver,
4623
                                  0)
4624

    
4625
    iobj = objects.Instance(name=instance, os=self.op.os_type,
4626
                            primary_node=pnode_name,
4627
                            nics=self.nics, disks=disks,
4628
                            disk_template=self.op.disk_template,
4629
                            admin_up=False,
4630
                            network_port=network_port,
4631
                            beparams=self.op.beparams,
4632
                            hvparams=self.op.hvparams,
4633
                            hypervisor=self.op.hypervisor,
4634
                            )
4635

    
4636
    feedback_fn("* creating instance disks...")
4637
    try:
4638
      _CreateDisks(self, iobj)
4639
    except errors.OpExecError:
4640
      self.LogWarning("Device creation failed, reverting...")
4641
      try:
4642
        _RemoveDisks(self, iobj)
4643
      finally:
4644
        self.cfg.ReleaseDRBDMinors(instance)
4645
        raise
4646

    
4647
    feedback_fn("adding instance %s to cluster config" % instance)
4648

    
4649
    self.cfg.AddInstance(iobj)
4650
    # Declare that we don't want to remove the instance lock anymore, as we've
4651
    # added the instance to the config
4652
    del self.remove_locks[locking.LEVEL_INSTANCE]
4653
    # Unlock all the nodes
4654
    if self.op.mode == constants.INSTANCE_IMPORT:
4655
      nodes_keep = [self.op.src_node]
4656
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
4657
                       if node != self.op.src_node]
4658
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
4659
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
4660
    else:
4661
      self.context.glm.release(locking.LEVEL_NODE)
4662
      del self.acquired_locks[locking.LEVEL_NODE]
4663

    
4664
    if self.op.wait_for_sync:
4665
      disk_abort = not _WaitForSync(self, iobj)
4666
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
4667
      # make sure the disks are not degraded (still sync-ing is ok)
4668
      time.sleep(15)
4669
      feedback_fn("* checking mirrors status")
4670
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
4671
    else:
4672
      disk_abort = False
4673

    
4674
    if disk_abort:
4675
      _RemoveDisks(self, iobj)
4676
      self.cfg.RemoveInstance(iobj.name)
4677
      # Make sure the instance lock gets removed
4678
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
4679
      raise errors.OpExecError("There are some degraded disks for"
4680
                               " this instance")
4681

    
4682
    feedback_fn("creating os for instance %s on node %s" %
4683
                (instance, pnode_name))
4684

    
4685
    if iobj.disk_template != constants.DT_DISKLESS:
4686
      if self.op.mode == constants.INSTANCE_CREATE:
4687
        feedback_fn("* running the instance OS create scripts...")
4688
        result = self.rpc.call_instance_os_add(pnode_name, iobj)
4689
        msg = result.RemoteFailMsg()
4690
        if msg:
4691
          raise errors.OpExecError("Could not add os for instance %s"
4692
                                   " on node %s: %s" %
4693
                                   (instance, pnode_name, msg))
4694

    
4695
      elif self.op.mode == constants.INSTANCE_IMPORT:
4696
        feedback_fn("* running the instance OS import scripts...")
4697
        src_node = self.op.src_node
4698
        src_images = self.src_images
4699
        cluster_name = self.cfg.GetClusterName()
4700
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
4701
                                                         src_node, src_images,
4702
                                                         cluster_name)
4703
        import_result.Raise()
4704
        for idx, result in enumerate(import_result.data):
4705
          if not result:
4706
            self.LogWarning("Could not import the image %s for instance"
4707
                            " %s, disk %d, on node %s" %
4708
                            (src_images[idx], instance, idx, pnode_name))
4709
      else:
4710
        # also checked in the prereq part
4711
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
4712
                                     % self.op.mode)
4713

    
4714
    if self.op.start:
4715
      iobj.admin_up = True
4716
      self.cfg.Update(iobj)
4717
      logging.info("Starting instance %s on node %s", instance, pnode_name)
4718
      feedback_fn("* starting instance...")
4719
      result = self.rpc.call_instance_start(pnode_name, iobj, None)
4720
      msg = result.RemoteFailMsg()
4721
      if msg:
4722
        raise errors.OpExecError("Could not start instance: %s" % msg)
4723

    
4724

    
4725
class LUConnectConsole(NoHooksLU):
4726
  """Connect to an instance's console.
4727

4728
  This is somewhat special in that it returns the command line that
4729
  you need to run on the master node in order to connect to the
4730
  console.
4731

4732
  """
4733
  _OP_REQP = ["instance_name"]
4734
  REQ_BGL = False
4735

    
4736
  def ExpandNames(self):
4737
    self._ExpandAndLockInstance()
4738

    
4739
  def CheckPrereq(self):
4740
    """Check prerequisites.
4741

4742
    This checks that the instance is in the cluster.
4743

4744
    """
4745
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4746
    assert self.instance is not None, \
4747
      "Cannot retrieve locked instance %s" % self.op.instance_name
4748
    _CheckNodeOnline(self, self.instance.primary_node)
4749

    
4750
  def Exec(self, feedback_fn):
4751
    """Connect to the console of an instance
4752

4753
    """
4754
    instance = self.instance
4755
    node = instance.primary_node
4756

    
4757
    node_insts = self.rpc.call_instance_list([node],
4758
                                             [instance.hypervisor])[node]
4759
    node_insts.Raise()
4760

    
4761
    if instance.name not in node_insts.data:
4762
      raise errors.OpExecError("Instance %s is not running." % instance.name)
4763

    
4764
    logging.debug("Connecting to console of %s on %s", instance.name, node)
4765

    
4766
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
4767
    cluster = self.cfg.GetClusterInfo()
4768
    # beparams and hvparams are passed separately, to avoid editing the
4769
    # instance and then saving the defaults in the instance itself.
4770
    hvparams = cluster.FillHV(instance)
4771
    beparams = cluster.FillBE(instance)
4772
    console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
4773

    
4774
    # build ssh cmdline
4775
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
4776

    
4777

    
4778
class LUReplaceDisks(LogicalUnit):
4779
  """Replace the disks of an instance.
4780

4781
  """
4782
  HPATH = "mirrors-replace"
4783
  HTYPE = constants.HTYPE_INSTANCE
4784
  _OP_REQP = ["instance_name", "mode", "disks"]
4785
  REQ_BGL = False
4786

    
4787
  def CheckArguments(self):
4788
    if not hasattr(self.op, "remote_node"):
4789
      self.op.remote_node = None
4790
    if not hasattr(self.op, "iallocator"):
4791
      self.op.iallocator = None
4792

    
4793
    # check for valid parameter combination
4794
    cnt = [self.op.remote_node, self.op.iallocator].count(None)
4795
    if self.op.mode == constants.REPLACE_DISK_CHG:
4796
      if cnt == 2:
4797
        raise errors.OpPrereqError("When changing the secondary either an"
4798
                                   " iallocator script must be used or the"
4799
                                   " new node given")
4800
      elif cnt == 0:
4801
        raise errors.OpPrereqError("Give either the iallocator or the new"
4802
                                   " secondary, not both")
4803
    else: # not replacing the secondary
4804
      if cnt != 2:
4805
        raise errors.OpPrereqError("The iallocator and new node options can"
4806
                                   " be used only when changing the"
4807
                                   " secondary node")
4808

    
4809
  def ExpandNames(self):
4810
    self._ExpandAndLockInstance()
4811

    
4812
    if self.op.iallocator is not None:
4813
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4814
    elif self.op.remote_node is not None:
4815
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
4816
      if remote_node is None:
4817
        raise errors.OpPrereqError("Node '%s' not known" %
4818
                                   self.op.remote_node)
4819
      self.op.remote_node = remote_node
4820
      # Warning: do not remove the locking of the new secondary here
4821
      # unless DRBD8.AddChildren is changed to work in parallel;
4822
      # currently it doesn't since parallel invocations of
4823
      # FindUnusedMinor will conflict
4824
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
4825
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
4826
    else:
4827
      self.needed_locks[locking.LEVEL_NODE] = []
4828
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4829

    
4830
  def DeclareLocks(self, level):
4831
    # If we're not already locking all nodes in the set we have to declare the
4832
    # instance's primary/secondary nodes.
4833
    if (level == locking.LEVEL_NODE and
4834
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
4835
      self._LockInstancesNodes()
4836

    
4837
  def _RunAllocator(self):
4838
    """Compute a new secondary node using an IAllocator.
4839

4840
    """
4841
    ial = IAllocator(self,
4842
                     mode=constants.IALLOCATOR_MODE_RELOC,
4843
                     name=self.op.instance_name,
4844
                     relocate_from=[self.sec_node])
4845

    
4846
    ial.Run(self.op.iallocator)
4847

    
4848
    if not ial.success:
4849
      raise errors.OpPrereqError("Can't compute nodes using"
4850
                                 " iallocator '%s': %s" % (self.op.iallocator,
4851
                                                           ial.info))
4852
    if len(ial.nodes) != ial.required_nodes:
4853
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4854
                                 " of nodes (%s), required %s" %
4855
                                 (len(ial.nodes), ial.required_nodes))
4856
    self.op.remote_node = ial.nodes[0]
4857
    self.LogInfo("Selected new secondary for the instance: %s",
4858
                 self.op.remote_node)
4859

    
4860
  def BuildHooksEnv(self):
4861
    """Build hooks env.
4862

4863
    This runs on the master, the primary and all the secondaries.
4864

4865
    """
4866
    env = {
4867
      "MODE": self.op.mode,
4868
      "NEW_SECONDARY": self.op.remote_node,
4869
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
4870
      }
4871
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4872
    nl = [
4873
      self.cfg.GetMasterNode(),
4874
      self.instance.primary_node,
4875
      ]
4876
    if self.op.remote_node is not None:
4877
      nl.append(self.op.remote_node)
4878
    return env, nl, nl
4879

    
4880
  def CheckPrereq(self):
4881
    """Check prerequisites.
4882

4883
    This checks that the instance is in the cluster.
4884

4885
    """
4886
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4887
    assert instance is not None, \
4888
      "Cannot retrieve locked instance %s" % self.op.instance_name
4889
    self.instance = instance
4890

    
4891
    if instance.disk_template != constants.DT_DRBD8:
4892
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
4893
                                 " instances")
4894

    
4895
    if len(instance.secondary_nodes) != 1:
4896
      raise errors.OpPrereqError("The instance has a strange layout,"
4897
                                 " expected one secondary but found %d" %
4898
                                 len(instance.secondary_nodes))
4899

    
4900
    self.sec_node = instance.secondary_nodes[0]
4901

    
4902
    if self.op.iallocator is not None:
4903
      self._RunAllocator()
4904

    
4905
    remote_node = self.op.remote_node
4906
    if remote_node is not None:
4907
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
4908
      assert self.remote_node_info is not None, \
4909
        "Cannot retrieve locked node %s" % remote_node
4910
    else:
4911
      self.remote_node_info = None
4912
    if remote_node == instance.primary_node:
4913
      raise errors.OpPrereqError("The specified node is the primary node of"
4914
                                 " the instance.")
4915
    elif remote_node == self.sec_node:
4916
      raise errors.OpPrereqError("The specified node is already the"
4917
                                 " secondary node of the instance.")
4918

    
4919
    if self.op.mode == constants.REPLACE_DISK_PRI:
4920
      n1 = self.tgt_node = instance.primary_node
4921
      n2 = self.oth_node = self.sec_node
4922
    elif self.op.mode == constants.REPLACE_DISK_SEC:
4923
      n1 = self.tgt_node = self.sec_node
4924
      n2 = self.oth_node = instance.primary_node
4925
    elif self.op.mode == constants.REPLACE_DISK_CHG:
4926
      n1 = self.new_node = remote_node
4927
      n2 = self.oth_node = instance.primary_node
4928
      self.tgt_node = self.sec_node
4929
      _CheckNodeNotDrained(self, remote_node)
4930
    else:
4931
      raise errors.ProgrammerError("Unhandled disk replace mode")
4932

    
4933
    _CheckNodeOnline(self, n1)
4934
    _CheckNodeOnline(self, n2)
4935

    
4936
    if not self.op.disks:
4937
      self.op.disks = range(len(instance.disks))
4938

    
4939
    for disk_idx in self.op.disks:
4940
      instance.FindDisk(disk_idx)
4941

    
4942
  def _ExecD8DiskOnly(self, feedback_fn):
4943
    """Replace a disk on the primary or secondary for dbrd8.
4944

4945
    The algorithm for replace is quite complicated:
4946

4947
      1. for each disk to be replaced:
4948

4949
        1. create new LVs on the target node with unique names
4950
        1. detach old LVs from the drbd device
4951
        1. rename old LVs to name_replaced.<time_t>
4952
        1. rename new LVs to old LVs
4953
        1. attach the new LVs (with the old names now) to the drbd device
4954

4955
      1. wait for sync across all devices
4956

4957
      1. for each modified disk:
4958

4959
        1. remove old LVs (which have the name name_replaces.<time_t>)
4960

4961
    Failures are not very well handled.
4962

4963
    """
4964
    steps_total = 6
4965
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
4966
    instance = self.instance
4967
    iv_names = {}
4968
    vgname = self.cfg.GetVGName()
4969
    # start of work
4970
    cfg = self.cfg
4971
    tgt_node = self.tgt_node
4972
    oth_node = self.oth_node
4973

    
4974
    # Step: check device activation
4975
    self.proc.LogStep(1, steps_total, "check device existence")
4976
    info("checking volume groups")
4977
    my_vg = cfg.GetVGName()
4978
    results = self.rpc.call_vg_list([oth_node, tgt_node])
4979
    if not results:
4980
      raise errors.OpExecError("Can't list volume groups on the nodes")
4981
    for node in oth_node, tgt_node:
4982
      res = results[node]
4983
      if res.failed or not res.data or my_vg not in res.data:
4984
        raise errors.OpExecError("Volume group '%s' not found on %s" %
4985
                                 (my_vg, node))
4986
    for idx, dev in enumerate(instance.disks):
4987
      if idx not in self.op.disks:
4988
        continue
4989
      for node in tgt_node, oth_node:
4990
        info("checking disk/%d on %s" % (idx, node))
4991
        cfg.SetDiskID(dev, node)
4992
        result = self.rpc.call_blockdev_find(node, dev)
4993
        msg = result.RemoteFailMsg()
4994
        if not msg and not result.payload:
4995
          msg = "disk not found"
4996
        if msg:
4997
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
4998
                                   (idx, node, msg))
4999

    
5000
    # Step: check other node consistency
5001
    self.proc.LogStep(2, steps_total, "check peer consistency")
5002
    for idx, dev in enumerate(instance.disks):
5003
      if idx not in self.op.disks:
5004
        continue
5005
      info("checking disk/%d consistency on %s" % (idx, oth_node))
5006
      if not _CheckDiskConsistency(self, dev, oth_node,
5007
                                   oth_node==instance.primary_node):
5008
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
5009
                                 " to replace disks on this node (%s)" %
5010
                                 (oth_node, tgt_node))
5011

    
5012
    # Step: create new storage
5013
    self.proc.LogStep(3, steps_total, "allocate new storage")
5014
    for idx, dev in enumerate(instance.disks):
5015
      if idx not in self.op.disks:
5016
        continue
5017
      size = dev.size
5018
      cfg.SetDiskID(dev, tgt_node)
5019
      lv_names = [".disk%d_%s" % (idx, suf)
5020
                  for suf in ["data", "meta"]]
5021
      names = _GenerateUniqueNames(self, lv_names)
5022
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
5023
                             logical_id=(vgname, names[0]))
5024
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
5025
                             logical_id=(vgname, names[1]))
5026
      new_lvs = [lv_data, lv_meta]
5027
      old_lvs = dev.children
5028
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
5029
      info("creating new local storage on %s for %s" %
5030
           (tgt_node, dev.iv_name))
5031
      # we pass force_create=True to force the LVM creation
5032
      for new_lv in new_lvs:
5033
        _CreateBlockDev(self, tgt_node, instance, new_lv, True,
5034
                        _GetInstanceInfoText(instance), False)
5035

    
5036
    # Step: for each lv, detach+rename*2+attach
5037
    self.proc.LogStep(4, steps_total, "change drbd configuration")
5038
    for dev, old_lvs, new_lvs in iv_names.itervalues():
5039
      info("detaching %s drbd from local storage" % dev.iv_name)
5040
      result = self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs)
5041
      result.Raise()
5042
      if not result.data:
5043
        raise errors.OpExecError("Can't detach drbd from local storage on node"
5044
                                 " %s for device %s" % (tgt_node, dev.iv_name))
5045
      #dev.children = []
5046
      #cfg.Update(instance)
5047

    
5048
      # ok, we created the new LVs, so now we know we have the needed
5049
      # storage; as such, we proceed on the target node to rename
5050
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
5051
      # using the assumption that logical_id == physical_id (which in
5052
      # turn is the unique_id on that node)
5053

    
5054
      # FIXME(iustin): use a better name for the replaced LVs
5055
      temp_suffix = int(time.time())
5056
      ren_fn = lambda d, suff: (d.physical_id[0],
5057
                                d.physical_id[1] + "_replaced-%s" % suff)
5058
      # build the rename list based on what LVs exist on the node
5059
      rlist = []
5060
      for to_ren in old_lvs:
5061
        result = self.rpc.call_blockdev_find(tgt_node, to_ren)
5062
        if not result.RemoteFailMsg() and result.payload:
5063
          # device exists
5064
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
5065

    
5066
      info("renaming the old LVs on the target node")
5067
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5068
      result.Raise()
5069
      if not result.data:
5070
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
5071
      # now we rename the new LVs to the old LVs
5072
      info("renaming the new LVs on the target node")
5073
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
5074
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5075
      result.Raise()
5076
      if not result.data:
5077
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
5078

    
5079
      for old, new in zip(old_lvs, new_lvs):
5080
        new.logical_id = old.logical_id
5081
        cfg.SetDiskID(new, tgt_node)
5082

    
5083
      for disk in old_lvs:
5084
        disk.logical_id = ren_fn(disk, temp_suffix)
5085
        cfg.SetDiskID(disk, tgt_node)
5086

    
5087
      # now that the new lvs have the old name, we can add them to the device
5088
      info("adding new mirror component on %s" % tgt_node)
5089
      result = self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs)
5090
      if result.failed or not result.data:
5091
        for new_lv in new_lvs:
5092
          msg = self.rpc.call_blockdev_remove(tgt_node, new_lv).RemoteFailMsg()
5093
          if msg:
5094
            warning("Can't rollback device %s: %s", dev, msg,
5095
                    hint="cleanup manually the unused logical volumes")
5096
        raise errors.OpExecError("Can't add local storage to drbd")
5097

    
5098
      dev.children = new_lvs
5099
      cfg.Update(instance)
5100

    
5101
    # Step: wait for sync
5102

    
5103
    # this can fail as the old devices are degraded and _WaitForSync
5104
    # does a combined result over all disks, so we don't check its
5105
    # return value
5106
    self.proc.LogStep(5, steps_total, "sync devices")
5107
    _WaitForSync(self, instance, unlock=True)
5108

    
5109
    # so check manually all the devices
5110
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5111
      cfg.SetDiskID(dev, instance.primary_node)
5112
      result = self.rpc.call_blockdev_find(instance.primary_node, dev)
5113
      msg = result.RemoteFailMsg()
5114
      if not msg and not result.payload:
5115
        msg = "disk not found"
5116
      if msg:
5117
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
5118
                                 (name, msg))
5119
      if result.payload[5]:
5120
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
5121

    
5122
    # Step: remove old storage
5123
    self.proc.LogStep(6, steps_total, "removing old storage")
5124
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5125
      info("remove logical volumes for %s" % name)
5126
      for lv in old_lvs:
5127
        cfg.SetDiskID(lv, tgt_node)
5128
        msg = self.rpc.call_blockdev_remove(tgt_node, lv).RemoteFailMsg()
5129
        if msg:
5130
          warning("Can't remove old LV: %s" % msg,
5131
                  hint="manually remove unused LVs")
5132
          continue
5133

    
5134
  def _ExecD8Secondary(self, feedback_fn):
5135
    """Replace the secondary node for drbd8.
5136

5137
    The algorithm for replace is quite complicated:
5138
      - for all disks of the instance:
5139
        - create new LVs on the new node with same names
5140
        - shutdown the drbd device on the old secondary
5141
        - disconnect the drbd network on the primary
5142
        - create the drbd device on the new secondary
5143
        - network attach the drbd on the primary, using an artifice:
5144
          the drbd code for Attach() will connect to the network if it
5145
          finds a device which is connected to the good local disks but
5146
          not network enabled
5147
      - wait for sync across all devices
5148
      - remove all disks from the old secondary
5149

5150
    Failures are not very well handled.
5151

5152
    """
5153
    steps_total = 6
5154
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
5155
    instance = self.instance
5156
    iv_names = {}
5157
    # start of work
5158
    cfg = self.cfg
5159
    old_node = self.tgt_node
5160
    new_node = self.new_node
5161
    pri_node = instance.primary_node
5162
    nodes_ip = {
5163
      old_node: self.cfg.GetNodeInfo(old_node).secondary_ip,
5164
      new_node: self.cfg.GetNodeInfo(new_node).secondary_ip,
5165
      pri_node: self.cfg.GetNodeInfo(pri_node).secondary_ip,
5166
      }
5167

    
5168
    # Step: check device activation
5169
    self.proc.LogStep(1, steps_total, "check device existence")
5170
    info("checking volume groups")
5171
    my_vg = cfg.GetVGName()
5172
    results = self.rpc.call_vg_list([pri_node, new_node])
5173
    for node in pri_node, new_node:
5174
      res = results[node]
5175
      if res.failed or not res.data or my_vg not in res.data:
5176
        raise errors.OpExecError("Volume group '%s' not found on %s" %
5177
                                 (my_vg, node))
5178
    for idx, dev in enumerate(instance.disks):
5179
      if idx not in self.op.disks:
5180
        continue
5181
      info("checking disk/%d on %s" % (idx, pri_node))
5182
      cfg.SetDiskID(dev, pri_node)
5183
      result = self.rpc.call_blockdev_find(pri_node, dev)
5184
      msg = result.RemoteFailMsg()
5185
      if not msg and not result.payload:
5186
        msg = "disk not found"
5187
      if msg:
5188
        raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5189
                                 (idx, pri_node, msg))
5190

    
5191
    # Step: check other node consistency
5192
    self.proc.LogStep(2, steps_total, "check peer consistency")
5193
    for idx, dev in enumerate(instance.disks):
5194
      if idx not in self.op.disks:
5195
        continue
5196
      info("checking disk/%d consistency on %s" % (idx, pri_node))
5197
      if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
5198
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
5199
                                 " unsafe to replace the secondary" %
5200
                                 pri_node)
5201

    
5202
    # Step: create new storage
5203
    self.proc.LogStep(3, steps_total, "allocate new storage")
5204
    for idx, dev in enumerate(instance.disks):
5205
      info("adding new local storage on %s for disk/%d" %
5206
           (new_node, idx))
5207
      # we pass force_create=True to force LVM creation
5208
      for new_lv in dev.children:
5209
        _CreateBlockDev(self, new_node, instance, new_lv, True,
5210
                        _GetInstanceInfoText(instance), False)
5211

    
5212
    # Step 4: dbrd minors and drbd setups changes
5213
    # after this, we must manually remove the drbd minors on both the
5214
    # error and the success paths
5215
    minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
5216
                                   instance.name)
5217
    logging.debug("Allocated minors %s" % (minors,))
5218
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
5219
    for idx, (dev, new_minor) in enumerate(zip(instance.disks, minors)):
5220
      size = dev.size
5221
      info("activating a new drbd on %s for disk/%d" % (new_node, idx))
5222
      # create new devices on new_node; note that we create two IDs:
5223
      # one without port, so the drbd will be activated without
5224
      # networking information on the new node at this stage, and one
5225
      # with network, for the latter activation in step 4
5226
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
5227
      if pri_node == o_node1:
5228
        p_minor = o_minor1
5229
      else:
5230
        p_minor = o_minor2
5231

    
5232
      new_alone_id = (pri_node, new_node, None, p_minor, new_minor, o_secret)
5233
      new_net_id = (pri_node, new_node, o_port, p_minor, new_minor, o_secret)
5234

    
5235
      iv_names[idx] = (dev, dev.children, new_net_id)
5236
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
5237
                    new_net_id)
5238
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
5239
                              logical_id=new_alone_id,
5240
                              children=dev.children)
5241
      try:
5242
        _CreateSingleBlockDev(self, new_node, instance, new_drbd,
5243
                              _GetInstanceInfoText(instance), False)
5244
      except errors.BlockDeviceError:
5245
        self.cfg.ReleaseDRBDMinors(instance.name)
5246
        raise
5247

    
5248
    for idx, dev in enumerate(instance.disks):
5249
      # we have new devices, shutdown the drbd on the old secondary
5250
      info("shutting down drbd for disk/%d on old node" % idx)
5251
      cfg.SetDiskID(dev, old_node)
5252
      msg = self.rpc.call_blockdev_shutdown(old_node, dev).RemoteFailMsg()
5253
      if msg:
5254
        warning("Failed to shutdown drbd for disk/%d on old node: %s" %
5255
                (idx, msg),
5256
                hint="Please cleanup this device manually as soon as possible")
5257

    
5258
    info("detaching primary drbds from the network (=> standalone)")
5259
    result = self.rpc.call_drbd_disconnect_net([pri_node], nodes_ip,
5260
                                               instance.disks)[pri_node]
5261

    
5262
    msg = result.RemoteFailMsg()
5263
    if msg:
5264
      # detaches didn't succeed (unlikely)
5265
      self.cfg.ReleaseDRBDMinors(instance.name)
5266
      raise errors.OpExecError("Can't detach the disks from the network on"
5267
                               " old node: %s" % (msg,))
5268

    
5269
    # if we managed to detach at least one, we update all the disks of
5270
    # the instance to point to the new secondary
5271
    info("updating instance configuration")
5272
    for dev, _, new_logical_id in iv_names.itervalues():
5273
      dev.logical_id = new_logical_id
5274
      cfg.SetDiskID(dev, pri_node)
5275
    cfg.Update(instance)
5276

    
5277
    # and now perform the drbd attach
5278
    info("attaching primary drbds to new secondary (standalone => connected)")
5279
    result = self.rpc.call_drbd_attach_net([pri_node, new_node], nodes_ip,
5280
                                           instance.disks, instance.name,
5281
                                           False)
5282
    for to_node, to_result in result.items():
5283
      msg = to_result.RemoteFailMsg()
5284
      if msg:
5285
        warning("can't attach drbd disks on node %s: %s", to_node, msg,
5286
                hint="please do a gnt-instance info to see the"
5287
                " status of disks")
5288

    
5289
    # this can fail as the old devices are degraded and _WaitForSync
5290
    # does a combined result over all disks, so we don't check its
5291
    # return value
5292
    self.proc.LogStep(5, steps_total, "sync devices")
5293
    _WaitForSync(self, instance, unlock=True)
5294

    
5295
    # so check manually all the devices
5296
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
5297
      cfg.SetDiskID(dev, pri_node)
5298
      result = self.rpc.call_blockdev_find(pri_node, dev)
5299
      msg = result.RemoteFailMsg()
5300
      if not msg and not result.payload:
5301
        msg = "disk not found"
5302
      if msg:
5303
        raise errors.OpExecError("Can't find DRBD device disk/%d: %s" %
5304
                                 (idx, msg))
5305
      if result.payload[5]:
5306
        raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx)
5307

    
5308
    self.proc.LogStep(6, steps_total, "removing old storage")
5309
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
5310
      info("remove logical volumes for disk/%d" % idx)
5311
      for lv in old_lvs:
5312
        cfg.SetDiskID(lv, old_node)
5313
        msg = self.rpc.call_blockdev_remove(old_node, lv).RemoteFailMsg()
5314
        if msg:
5315
          warning("Can't remove LV on old secondary: %s", msg,
5316
                  hint="Cleanup stale volumes by hand")
5317

    
5318
  def Exec(self, feedback_fn):
5319
    """Execute disk replacement.
5320

5321
    This dispatches the disk replacement to the appropriate handler.
5322

5323
    """
5324
    instance = self.instance
5325

    
5326
    # Activate the instance disks if we're replacing them on a down instance
5327
    if not instance.admin_up:
5328
      _StartInstanceDisks(self, instance, True)
5329

    
5330
    if self.op.mode == constants.REPLACE_DISK_CHG:
5331
      fn = self._ExecD8Secondary
5332
    else:
5333
      fn = self._ExecD8DiskOnly
5334

    
5335
    ret = fn(feedback_fn)
5336

    
5337
    # Deactivate the instance disks if we're replacing them on a down instance
5338
    if not instance.admin_up:
5339
      _SafeShutdownInstanceDisks(self, instance)
5340

    
5341
    return ret
5342

    
5343

    
5344
class LUGrowDisk(LogicalUnit):
5345
  """Grow a disk of an instance.
5346

5347
  """
5348
  HPATH = "disk-grow"
5349
  HTYPE = constants.HTYPE_INSTANCE
5350
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
5351
  REQ_BGL = False
5352

    
5353
  def ExpandNames(self):
5354
    self._ExpandAndLockInstance()
5355
    self.needed_locks[locking.LEVEL_NODE] = []
5356
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5357

    
5358
  def DeclareLocks(self, level):
5359
    if level == locking.LEVEL_NODE:
5360
      self._LockInstancesNodes()
5361

    
5362
  def BuildHooksEnv(self):
5363
    """Build hooks env.
5364

5365
    This runs on the master, the primary and all the secondaries.
5366

5367
    """
5368
    env = {
5369
      "DISK": self.op.disk,
5370
      "AMOUNT": self.op.amount,
5371
      }
5372
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5373
    nl = [
5374
      self.cfg.GetMasterNode(),
5375
      self.instance.primary_node,
5376
      ]
5377
    return env, nl, nl
5378

    
5379
  def CheckPrereq(self):
5380
    """Check prerequisites.
5381

5382
    This checks that the instance is in the cluster.
5383

5384
    """
5385
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5386
    assert instance is not None, \
5387
      "Cannot retrieve locked instance %s" % self.op.instance_name
5388
    nodenames = list(instance.all_nodes)
5389
    for node in nodenames:
5390
      _CheckNodeOnline(self, node)
5391

    
5392

    
5393
    self.instance = instance
5394

    
5395
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
5396
      raise errors.OpPrereqError("Instance's disk layout does not support"
5397
                                 " growing.")
5398

    
5399
    self.disk = instance.FindDisk(self.op.disk)
5400

    
5401
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
5402
                                       instance.hypervisor)
5403
    for node in nodenames:
5404
      info = nodeinfo[node]
5405
      if info.failed or not info.data:
5406
        raise errors.OpPrereqError("Cannot get current information"
5407
                                   " from node '%s'" % node)
5408
      vg_free = info.data.get('vg_free', None)
5409
      if not isinstance(vg_free, int):
5410
        raise errors.OpPrereqError("Can't compute free disk space on"
5411
                                   " node %s" % node)
5412
      if self.op.amount > vg_free:
5413
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
5414
                                   " %d MiB available, %d MiB required" %
5415
                                   (node, vg_free, self.op.amount))
5416

    
5417
  def Exec(self, feedback_fn):
5418
    """Execute disk grow.
5419

5420
    """
5421
    instance = self.instance
5422
    disk = self.disk
5423
    for node in instance.all_nodes:
5424
      self.cfg.SetDiskID(disk, node)
5425
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
5426
      msg = result.RemoteFailMsg()
5427
      if msg:
5428
        raise errors.OpExecError("Grow request failed to node %s: %s" %
5429
                                 (node, msg))
5430
    disk.RecordGrow(self.op.amount)
5431
    self.cfg.Update(instance)
5432
    if self.op.wait_for_sync:
5433
      disk_abort = not _WaitForSync(self, instance)
5434
      if disk_abort:
5435
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
5436
                             " status.\nPlease check the instance.")
5437

    
5438

    
5439
class LUQueryInstanceData(NoHooksLU):
5440
  """Query runtime instance data.
5441

5442
  """
5443
  _OP_REQP = ["instances", "static"]
5444
  REQ_BGL = False
5445

    
5446
  def ExpandNames(self):
5447
    self.needed_locks = {}
5448
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
5449

    
5450
    if not isinstance(self.op.instances, list):
5451
      raise errors.OpPrereqError("Invalid argument type 'instances'")
5452

    
5453
    if self.op.instances:
5454
      self.wanted_names = []
5455
      for name in self.op.instances:
5456
        full_name = self.cfg.ExpandInstanceName(name)
5457
        if full_name is None:
5458
          raise errors.OpPrereqError("Instance '%s' not known" % name)
5459
        self.wanted_names.append(full_name)
5460
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
5461
    else:
5462
      self.wanted_names = None
5463
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
5464

    
5465
    self.needed_locks[locking.LEVEL_NODE] = []
5466
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5467

    
5468
  def DeclareLocks(self, level):
5469
    if level == locking.LEVEL_NODE:
5470
      self._LockInstancesNodes()
5471

    
5472
  def CheckPrereq(self):
5473
    """Check prerequisites.
5474

5475
    This only checks the optional instance list against the existing names.
5476

5477
    """
5478
    if self.wanted_names is None:
5479
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
5480

    
5481
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
5482
                             in self.wanted_names]
5483
    return
5484

    
5485
  def _ComputeDiskStatus(self, instance, snode, dev):
5486
    """Compute block device status.
5487

5488
    """
5489
    static = self.op.static
5490
    if not static:
5491
      self.cfg.SetDiskID(dev, instance.primary_node)
5492
      dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
5493
      msg = dev_pstatus.RemoteFailMsg()
5494
      if msg:
5495
        raise errors.OpExecError("Can't compute disk status for %s: %s" %
5496
                                 (instance.name, msg))
5497
      dev_pstatus = dev_pstatus.payload
5498
    else:
5499
      dev_pstatus = None
5500

    
5501
    if dev.dev_type in constants.LDS_DRBD:
5502
      # we change the snode then (otherwise we use the one passed in)
5503
      if dev.logical_id[0] == instance.primary_node:
5504
        snode = dev.logical_id[1]
5505
      else:
5506
        snode = dev.logical_id[0]
5507

    
5508
    if snode and not static:
5509
      self.cfg.SetDiskID(dev, snode)
5510
      dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
5511
      msg = dev_sstatus.RemoteFailMsg()
5512
      if msg:
5513
        raise errors.OpExecError("Can't compute disk status for %s: %s" %
5514
                                 (instance.name, msg))
5515
      dev_sstatus = dev_sstatus.payload
5516
    else:
5517
      dev_sstatus = None
5518

    
5519
    if dev.children:
5520
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
5521
                      for child in dev.children]
5522
    else:
5523
      dev_children = []
5524

    
5525
    data = {
5526
      "iv_name": dev.iv_name,
5527
      "dev_type": dev.dev_type,
5528
      "logical_id": dev.logical_id,
5529
      "physical_id": dev.physical_id,
5530
      "pstatus": dev_pstatus,
5531
      "sstatus": dev_sstatus,
5532
      "children": dev_children,
5533
      "mode": dev.mode,
5534
      }
5535

    
5536
    return data
5537

    
5538
  def Exec(self, feedback_fn):
5539
    """Gather and return data"""
5540
    result = {}
5541

    
5542
    cluster = self.cfg.GetClusterInfo()
5543

    
5544
    for instance in self.wanted_instances:
5545
      if not self.op.static:
5546
        remote_info = self.rpc.call_instance_info(instance.primary_node,
5547
                                                  instance.name,
5548
                                                  instance.hypervisor)
5549
        remote_info.Raise()
5550
        remote_info = remote_info.data
5551
        if remote_info and "state" in remote_info:
5552
          remote_state = "up"
5553
        else:
5554
          remote_state = "down"
5555
      else:
5556
        remote_state = None
5557
      if instance.admin_up:
5558
        config_state = "up"
5559
      else:
5560
        config_state = "down"
5561

    
5562
      disks = [self._ComputeDiskStatus(instance, None, device)
5563
               for device in instance.disks]
5564

    
5565
      idict = {
5566
        "name": instance.name,
5567
        "config_state": config_state,
5568
        "run_state": remote_state,
5569
        "pnode": instance.primary_node,
5570
        "snodes": instance.secondary_nodes,
5571
        "os": instance.os,
5572
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
5573
        "disks": disks,
5574
        "hypervisor": instance.hypervisor,
5575
        "network_port": instance.network_port,
5576
        "hv_instance": instance.hvparams,
5577
        "hv_actual": cluster.FillHV(instance),
5578
        "be_instance": instance.beparams,
5579
        "be_actual": cluster.FillBE(instance),
5580
        }
5581

    
5582
      result[instance.name] = idict
5583

    
5584
    return result
5585

    
5586

    
5587
class LUSetInstanceParams(LogicalUnit):
5588
  """Modifies an instances's parameters.
5589

5590
  """
5591
  HPATH = "instance-modify"
5592
  HTYPE = constants.HTYPE_INSTANCE
5593
  _OP_REQP = ["instance_name"]
5594
  REQ_BGL = False
5595

    
5596
  def CheckArguments(self):
5597
    if not hasattr(self.op, 'nics'):
5598
      self.op.nics = []
5599
    if not hasattr(self.op, 'disks'):
5600
      self.op.disks = []
5601
    if not hasattr(self.op, 'beparams'):
5602
      self.op.beparams = {}
5603
    if not hasattr(self.op, 'hvparams'):
5604
      self.op.hvparams = {}
5605
    self.op.force = getattr(self.op, "force", False)
5606
    if not (self.op.nics or self.op.disks or
5607
            self.op.hvparams or self.op.beparams):
5608
      raise errors.OpPrereqError("No changes submitted")
5609

    
5610
    # Disk validation
5611
    disk_addremove = 0
5612
    for disk_op, disk_dict in self.op.disks:
5613
      if disk_op == constants.DDM_REMOVE:
5614
        disk_addremove += 1
5615
        continue
5616
      elif disk_op == constants.DDM_ADD:
5617
        disk_addremove += 1
5618
      else:
5619
        if not isinstance(disk_op, int):
5620
          raise errors.OpPrereqError("Invalid disk index")
5621
      if disk_op == constants.DDM_ADD:
5622
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
5623
        if mode not in constants.DISK_ACCESS_SET:
5624
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode)
5625
        size = disk_dict.get('size', None)
5626
        if size is None:
5627
          raise errors.OpPrereqError("Required disk parameter size missing")
5628
        try:
5629
          size = int(size)
5630
        except ValueError, err:
5631
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
5632
                                     str(err))
5633
        disk_dict['size'] = size
5634
      else:
5635
        # modification of disk
5636
        if 'size' in disk_dict:
5637
          raise errors.OpPrereqError("Disk size change not possible, use"
5638
                                     " grow-disk")
5639

    
5640
    if disk_addremove > 1:
5641
      raise errors.OpPrereqError("Only one disk add or remove operation"
5642
                                 " supported at a time")
5643

    
5644
    # NIC validation
5645
    nic_addremove = 0
5646
    for nic_op, nic_dict in self.op.nics:
5647
      if nic_op == constants.DDM_REMOVE:
5648
        nic_addremove += 1
5649
        continue
5650
      elif nic_op == constants.DDM_ADD:
5651
        nic_addremove += 1
5652
      else:
5653
        if not isinstance(nic_op, int):
5654
          raise errors.OpPrereqError("Invalid nic index")
5655

    
5656
      # nic_dict should be a dict
5657
      nic_ip = nic_dict.get('ip', None)
5658
      if nic_ip is not None:
5659
        if nic_ip.lower() == "none":
5660
          nic_dict['ip'] = None
5661
        else:
5662
          if not utils.IsValidIP(nic_ip):
5663
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip)
5664
      # we can only check None bridges and assign the default one
5665
      nic_bridge = nic_dict.get('bridge', None)
5666
      if nic_bridge is None:
5667
        nic_dict['bridge'] = self.cfg.GetDefBridge()
5668
      # but we can validate MACs
5669
      nic_mac = nic_dict.get('mac', None)
5670
      if nic_mac is not None:
5671
        if self.cfg.IsMacInUse(nic_mac):
5672
          raise errors.OpPrereqError("MAC address %s already in use"
5673
                                     " in cluster" % nic_mac)
5674
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5675
          if not utils.IsValidMac(nic_mac):
5676
            raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac)
5677
    if nic_addremove > 1:
5678
      raise errors.OpPrereqError("Only one NIC add or remove operation"
5679
                                 " supported at a time")
5680

    
5681
  def ExpandNames(self):
5682
    self._ExpandAndLockInstance()
5683
    self.needed_locks[locking.LEVEL_NODE] = []
5684
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5685

    
5686
  def DeclareLocks(self, level):
5687
    if level == locking.LEVEL_NODE:
5688
      self._LockInstancesNodes()
5689

    
5690
  def BuildHooksEnv(self):
5691
    """Build hooks env.
5692

5693
    This runs on the master, primary and secondaries.
5694

5695
    """
5696
    args = dict()
5697
    if constants.BE_MEMORY in self.be_new:
5698
      args['memory'] = self.be_new[constants.BE_MEMORY]
5699
    if constants.BE_VCPUS in self.be_new:
5700
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
5701
    # FIXME: readd disk/nic changes
5702
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
5703
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5704
    return env, nl, nl
5705

    
5706
  def CheckPrereq(self):
5707
    """Check prerequisites.
5708

5709
    This only checks the instance list against the existing names.
5710

5711
    """
5712
    force = self.force = self.op.force
5713

    
5714
    # checking the new params on the primary/secondary nodes
5715

    
5716
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5717
    assert self.instance is not None, \
5718
      "Cannot retrieve locked instance %s" % self.op.instance_name
5719
    pnode = instance.primary_node
5720
    nodelist = list(instance.all_nodes)
5721

    
5722
    # hvparams processing
5723
    if self.op.hvparams:
5724
      i_hvdict = copy.deepcopy(instance.hvparams)
5725
      for key, val in self.op.hvparams.iteritems():
5726
        if val == constants.VALUE_DEFAULT:
5727
          try:
5728
            del i_hvdict[key]
5729
          except KeyError:
5730
            pass
5731
        else:
5732
          i_hvdict[key] = val
5733
      cluster = self.cfg.GetClusterInfo()
5734
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
5735
      hv_new = cluster.FillDict(cluster.hvparams[instance.hypervisor],
5736
                                i_hvdict)
5737
      # local check
5738
      hypervisor.GetHypervisor(
5739
        instance.hypervisor).CheckParameterSyntax(hv_new)
5740
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
5741
      self.hv_new = hv_new # the new actual values
5742
      self.hv_inst = i_hvdict # the new dict (without defaults)
5743
    else:
5744
      self.hv_new = self.hv_inst = {}
5745

    
5746
    # beparams processing
5747
    if self.op.beparams:
5748
      i_bedict = copy.deepcopy(instance.beparams)
5749
      for key, val in self.op.beparams.iteritems():
5750
        if val == constants.VALUE_DEFAULT:
5751
          try:
5752
            del i_bedict[key]
5753
          except KeyError:
5754
            pass
5755
        else:
5756
          i_bedict[key] = val
5757
      cluster = self.cfg.GetClusterInfo()
5758
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
5759
      be_new = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
5760
                                i_bedict)
5761
      self.be_new = be_new # the new actual values
5762
      self.be_inst = i_bedict # the new dict (without defaults)
5763
    else:
5764
      self.be_new = self.be_inst = {}
5765

    
5766
    self.warn = []
5767

    
5768
    if constants.BE_MEMORY in self.op.beparams and not self.force:
5769
      mem_check_list = [pnode]
5770
      if be_new[constants.BE_AUTO_BALANCE]:
5771
        # either we changed auto_balance to yes or it was from before
5772
        mem_check_list.extend(instance.secondary_nodes)
5773
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
5774
                                                  instance.hypervisor)
5775
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
5776
                                         instance.hypervisor)
5777
      if nodeinfo[pnode].failed or not isinstance(nodeinfo[pnode].data, dict):
5778
        # Assume the primary node is unreachable and go ahead
5779
        self.warn.append("Can't get info from primary node %s" % pnode)
5780
      else:
5781
        if not instance_info.failed and instance_info.data:
5782
          current_mem = instance_info.data['memory']
5783
        else:
5784
          # Assume instance not running
5785
          # (there is a slight race condition here, but it's not very probable,
5786
          # and we have no other way to check)
5787
          current_mem = 0
5788
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
5789
                    nodeinfo[pnode].data['memory_free'])
5790
        if miss_mem > 0:
5791
          raise errors.OpPrereqError("This change will prevent the instance"
5792
                                     " from starting, due to %d MB of memory"
5793
                                     " missing on its primary node" % miss_mem)
5794

    
5795
      if be_new[constants.BE_AUTO_BALANCE]:
5796
        for node, nres in nodeinfo.iteritems():
5797
          if node not in instance.secondary_nodes:
5798
            continue
5799
          if nres.failed or not isinstance(nres.data, dict):
5800
            self.warn.append("Can't get info from secondary node %s" % node)
5801
          elif be_new[constants.BE_MEMORY] > nres.data['memory_free']:
5802
            self.warn.append("Not enough memory to failover instance to"
5803
                             " secondary node %s" % node)
5804

    
5805
    # NIC processing
5806
    for nic_op, nic_dict in self.op.nics:
5807
      if nic_op == constants.DDM_REMOVE:
5808
        if not instance.nics:
5809
          raise errors.OpPrereqError("Instance has no NICs, cannot remove")
5810
        continue
5811
      if nic_op != constants.DDM_ADD:
5812
        # an existing nic
5813
        if nic_op < 0 or nic_op >= len(instance.nics):
5814
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
5815
                                     " are 0 to %d" %
5816
                                     (nic_op, len(instance.nics)))
5817
      nic_bridge = nic_dict.get('bridge', None)
5818
      if nic_bridge is not None:
5819
        if not self.rpc.call_bridges_exist(pnode, [nic_bridge]):
5820
          msg = ("Bridge '%s' doesn't exist on one of"
5821
                 " the instance nodes" % nic_bridge)
5822
          if self.force:
5823
            self.warn.append(msg)
5824
          else:
5825
            raise errors.OpPrereqError(msg)
5826

    
5827
    # DISK processing
5828
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
5829
      raise errors.OpPrereqError("Disk operations not supported for"
5830
                                 " diskless instances")
5831
    for disk_op, disk_dict in self.op.disks:
5832
      if disk_op == constants.DDM_REMOVE:
5833
        if len(instance.disks) == 1:
5834
          raise errors.OpPrereqError("Cannot remove the last disk of"
5835
                                     " an instance")
5836
        ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
5837
        ins_l = ins_l[pnode]
5838
        if ins_l.failed or not isinstance(ins_l.data, list):
5839
          raise errors.OpPrereqError("Can't contact node '%s'" % pnode)
5840
        if instance.name in ins_l.data:
5841
          raise errors.OpPrereqError("Instance is running, can't remove"
5842
                                     " disks.")
5843

    
5844
      if (disk_op == constants.DDM_ADD and
5845
          len(instance.nics) >= constants.MAX_DISKS):
5846
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
5847
                                   " add more" % constants.MAX_DISKS)
5848
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
5849
        # an existing disk
5850
        if disk_op < 0 or disk_op >= len(instance.disks):
5851
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
5852
                                     " are 0 to %d" %
5853
                                     (disk_op, len(instance.disks)))
5854

    
5855
    return
5856

    
5857
  def Exec(self, feedback_fn):
5858
    """Modifies an instance.
5859

5860
    All parameters take effect only at the next restart of the instance.
5861

5862
    """
5863
    # Process here the warnings from CheckPrereq, as we don't have a
5864
    # feedback_fn there.
5865
    for warn in self.warn:
5866
      feedback_fn("WARNING: %s" % warn)
5867

    
5868
    result = []
5869
    instance = self.instance
5870
    # disk changes
5871
    for disk_op, disk_dict in self.op.disks:
5872
      if disk_op == constants.DDM_REMOVE:
5873
        # remove the last disk
5874
        device = instance.disks.pop()
5875
        device_idx = len(instance.disks)
5876
        for node, disk in device.ComputeNodeTree(instance.primary_node):
5877
          self.cfg.SetDiskID(disk, node)
5878
          msg = self.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
5879
          if msg:
5880
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
5881
                            " continuing anyway", device_idx, node, msg)
5882
        result.append(("disk/%d" % device_idx, "remove"))
5883
      elif disk_op == constants.DDM_ADD:
5884
        # add a new disk
5885
        if instance.disk_template == constants.DT_FILE:
5886
          file_driver, file_path = instance.disks[0].logical_id
5887
          file_path = os.path.dirname(file_path)
5888
        else:
5889
          file_driver = file_path = None
5890
        disk_idx_base = len(instance.disks)
5891
        new_disk = _GenerateDiskTemplate(self,
5892
                                         instance.disk_template,
5893
                                         instance.name, instance.primary_node,
5894
                                         instance.secondary_nodes,
5895
                                         [disk_dict],
5896
                                         file_path,
5897
                                         file_driver,
5898
                                         disk_idx_base)[0]
5899
        instance.disks.append(new_disk)
5900
        info = _GetInstanceInfoText(instance)
5901

    
5902
        logging.info("Creating volume %s for instance %s",
5903
                     new_disk.iv_name, instance.name)
5904
        # Note: this needs to be kept in sync with _CreateDisks
5905
        #HARDCODE
5906
        for node in instance.all_nodes:
5907
          f_create = node == instance.primary_node
5908
          try:
5909
            _CreateBlockDev(self, node, instance, new_disk,
5910
                            f_create, info, f_create)
5911
          except errors.OpExecError, err:
5912
            self.LogWarning("Failed to create volume %s (%s) on"
5913
                            " node %s: %s",
5914
                            new_disk.iv_name, new_disk, node, err)
5915
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
5916
                       (new_disk.size, new_disk.mode)))
5917
      else:
5918
        # change a given disk
5919
        instance.disks[disk_op].mode = disk_dict['mode']
5920
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
5921
    # NIC changes
5922
    for nic_op, nic_dict in self.op.nics:
5923
      if nic_op == constants.DDM_REMOVE:
5924
        # remove the last nic
5925
        del instance.nics[-1]
5926
        result.append(("nic.%d" % len(instance.nics), "remove"))
5927
      elif nic_op == constants.DDM_ADD:
5928
        # add a new nic
5929
        if 'mac' not in nic_dict:
5930
          mac = constants.VALUE_GENERATE
5931
        else:
5932
          mac = nic_dict['mac']
5933
        if mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5934
          mac = self.cfg.GenerateMAC()
5935
        new_nic = objects.NIC(mac=mac, ip=nic_dict.get('ip', None),
5936
                              bridge=nic_dict.get('bridge', None))
5937
        instance.nics.append(new_nic)
5938
        result.append(("nic.%d" % (len(instance.nics) - 1),
5939
                       "add:mac=%s,ip=%s,bridge=%s" %
5940
                       (new_nic.mac, new_nic.ip, new_nic.bridge)))
5941
      else:
5942
        # change a given nic
5943
        for key in 'mac', 'ip', 'bridge':
5944
          if key in nic_dict:
5945
            setattr(instance.nics[nic_op], key, nic_dict[key])
5946
            result.append(("nic.%s/%d" % (key, nic_op), nic_dict[key]))
5947

    
5948
    # hvparams changes
5949
    if self.op.hvparams:
5950
      instance.hvparams = self.hv_inst
5951
      for key, val in self.op.hvparams.iteritems():
5952
        result.append(("hv/%s" % key, val))
5953

    
5954
    # beparams changes
5955
    if self.op.beparams:
5956
      instance.beparams = self.be_inst
5957
      for key, val in self.op.beparams.iteritems():
5958
        result.append(("be/%s" % key, val))
5959

    
5960
    self.cfg.Update(instance)
5961

    
5962
    return result
5963

    
5964

    
5965
class LUQueryExports(NoHooksLU):
5966
  """Query the exports list
5967

5968
  """
5969
  _OP_REQP = ['nodes']
5970
  REQ_BGL = False
5971

    
5972
  def ExpandNames(self):
5973
    self.needed_locks = {}
5974
    self.share_locks[locking.LEVEL_NODE] = 1
5975
    if not self.op.nodes:
5976
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5977
    else:
5978
      self.needed_locks[locking.LEVEL_NODE] = \
5979
        _GetWantedNodes(self, self.op.nodes)
5980

    
5981
  def CheckPrereq(self):
5982
    """Check prerequisites.
5983

5984
    """
5985
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
5986

    
5987
  def Exec(self, feedback_fn):
5988
    """Compute the list of all the exported system images.
5989

5990
    @rtype: dict
5991
    @return: a dictionary with the structure node->(export-list)
5992
        where export-list is a list of the instances exported on
5993
        that node.
5994

5995
    """
5996
    rpcresult = self.rpc.call_export_list(self.nodes)
5997
    result = {}
5998
    for node in rpcresult:
5999
      if rpcresult[node].failed:
6000
        result[node] = False
6001
      else:
6002
        result[node] = rpcresult[node].data
6003

    
6004
    return result
6005

    
6006

    
6007
class LUExportInstance(LogicalUnit):
6008
  """Export an instance to an image in the cluster.
6009

6010
  """
6011
  HPATH = "instance-export"
6012
  HTYPE = constants.HTYPE_INSTANCE
6013
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
6014
  REQ_BGL = False
6015

    
6016
  def ExpandNames(self):
6017
    self._ExpandAndLockInstance()
6018
    # FIXME: lock only instance primary and destination node
6019
    #
6020
    # Sad but true, for now we have do lock all nodes, as we don't know where
6021
    # the previous export might be, and and in this LU we search for it and
6022
    # remove it from its current node. In the future we could fix this by:
6023
    #  - making a tasklet to search (share-lock all), then create the new one,
6024
    #    then one to remove, after
6025
    #  - removing the removal operation altoghether
6026
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6027

    
6028
  def DeclareLocks(self, level):
6029
    """Last minute lock declaration."""
6030
    # All nodes are locked anyway, so nothing to do here.
6031

    
6032
  def BuildHooksEnv(self):
6033
    """Build hooks env.
6034

6035
    This will run on the master, primary node and target node.
6036

6037
    """
6038
    env = {
6039
      "EXPORT_NODE": self.op.target_node,
6040
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
6041
      }
6042
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6043
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
6044
          self.op.target_node]
6045
    return env, nl, nl
6046

    
6047
  def CheckPrereq(self):
6048
    """Check prerequisites.
6049

6050
    This checks that the instance and node names are valid.
6051

6052
    """
6053
    instance_name = self.op.instance_name
6054
    self.instance = self.cfg.GetInstanceInfo(instance_name)
6055
    assert self.instance is not None, \
6056
          "Cannot retrieve locked instance %s" % self.op.instance_name
6057
    _CheckNodeOnline(self, self.instance.primary_node)
6058

    
6059
    self.dst_node = self.cfg.GetNodeInfo(
6060
      self.cfg.ExpandNodeName(self.op.target_node))
6061

    
6062
    if self.dst_node is None:
6063
      # This is wrong node name, not a non-locked node
6064
      raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node)
6065
    _CheckNodeOnline(self, self.dst_node.name)
6066
    _CheckNodeNotDrained(self, self.dst_node.name)
6067

    
6068
    # instance disk type verification
6069
    for disk in self.instance.disks:
6070
      if disk.dev_type == constants.LD_FILE:
6071
        raise errors.OpPrereqError("Export not supported for instances with"
6072
                                   " file-based disks")
6073

    
6074
  def Exec(self, feedback_fn):
6075
    """Export an instance to an image in the cluster.
6076

6077
    """
6078
    instance = self.instance
6079
    dst_node = self.dst_node
6080
    src_node = instance.primary_node
6081
    if self.op.shutdown:
6082
      # shutdown the instance, but not the disks
6083
      result = self.rpc.call_instance_shutdown(src_node, instance)
6084
      result.Raise()
6085
      if not result.data:
6086
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
6087
                                 (instance.name, src_node))
6088

    
6089
    vgname = self.cfg.GetVGName()
6090

    
6091
    snap_disks = []
6092

    
6093
    # set the disks ID correctly since call_instance_start needs the
6094
    # correct drbd minor to create the symlinks
6095
    for disk in instance.disks:
6096
      self.cfg.SetDiskID(disk, src_node)
6097

    
6098
    try:
6099
      for disk in instance.disks:
6100
        # new_dev_name will be a snapshot of an lvm leaf of the one we passed
6101
        new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk)
6102
        if new_dev_name.failed or not new_dev_name.data:
6103
          self.LogWarning("Could not snapshot block device %s on node %s",
6104
                          disk.logical_id[1], src_node)
6105
          snap_disks.append(False)
6106
        else:
6107
          new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
6108
                                 logical_id=(vgname, new_dev_name.data),
6109
                                 physical_id=(vgname, new_dev_name.data),
6110
                                 iv_name=disk.iv_name)
6111
          snap_disks.append(new_dev)
6112

    
6113
    finally:
6114
      if self.op.shutdown and instance.admin_up:
6115
        result = self.rpc.call_instance_start(src_node, instance, None)
6116
        msg = result.RemoteFailMsg()
6117
        if msg:
6118
          _ShutdownInstanceDisks(self, instance)
6119
          raise errors.OpExecError("Could not start instance: %s" % msg)
6120

    
6121
    # TODO: check for size
6122

    
6123
    cluster_name = self.cfg.GetClusterName()
6124
    for idx, dev in enumerate(snap_disks):
6125
      if dev:
6126
        result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
6127
                                               instance, cluster_name, idx)
6128
        if result.failed or not result.data:
6129
          self.LogWarning("Could not export block device %s from node %s to"
6130
                          " node %s", dev.logical_id[1], src_node,
6131
                          dst_node.name)
6132
        msg = self.rpc.call_blockdev_remove(src_node, dev).RemoteFailMsg()
6133
        if msg:
6134
          self.LogWarning("Could not remove snapshot block device %s from node"
6135
                          " %s: %s", dev.logical_id[1], src_node, msg)
6136

    
6137
    result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
6138
    if result.failed or not result.data:
6139
      self.LogWarning("Could not finalize export for instance %s on node %s",
6140
                      instance.name, dst_node.name)
6141

    
6142
    nodelist = self.cfg.GetNodeList()
6143
    nodelist.remove(dst_node.name)
6144

    
6145
    # on one-node clusters nodelist will be empty after the removal
6146
    # if we proceed the backup would be removed because OpQueryExports
6147
    # substitutes an empty list with the full cluster node list.
6148
    if nodelist:
6149
      exportlist = self.rpc.call_export_list(nodelist)
6150
      for node in exportlist:
6151
        if exportlist[node].failed:
6152
          continue
6153
        if instance.name in exportlist[node].data:
6154
          if not self.rpc.call_export_remove(node, instance.name):
6155
            self.LogWarning("Could not remove older export for instance %s"
6156
                            " on node %s", instance.name, node)
6157

    
6158

    
6159
class LURemoveExport(NoHooksLU):
6160
  """Remove exports related to the named instance.
6161

6162
  """
6163
  _OP_REQP = ["instance_name"]
6164
  REQ_BGL = False
6165

    
6166
  def ExpandNames(self):
6167
    self.needed_locks = {}
6168
    # We need all nodes to be locked in order for RemoveExport to work, but we
6169
    # don't need to lock the instance itself, as nothing will happen to it (and
6170
    # we can remove exports also for a removed instance)
6171
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6172

    
6173
  def CheckPrereq(self):
6174
    """Check prerequisites.
6175
    """
6176
    pass
6177

    
6178
  def Exec(self, feedback_fn):
6179
    """Remove any export.
6180

6181
    """
6182
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
6183
    # If the instance was not found we'll try with the name that was passed in.
6184
    # This will only work if it was an FQDN, though.
6185
    fqdn_warn = False
6186
    if not instance_name:
6187
      fqdn_warn = True
6188
      instance_name = self.op.instance_name
6189

    
6190
    exportlist = self.rpc.call_export_list(self.acquired_locks[
6191
      locking.LEVEL_NODE])
6192
    found = False
6193
    for node in exportlist:
6194
      if exportlist[node].failed:
6195
        self.LogWarning("Failed to query node %s, continuing" % node)
6196
        continue
6197
      if instance_name in exportlist[node].data:
6198
        found = True
6199
        result = self.rpc.call_export_remove(node, instance_name)
6200
        if result.failed or not result.data:
6201
          logging.error("Could not remove export for instance %s"
6202
                        " on node %s", instance_name, node)
6203

    
6204
    if fqdn_warn and not found:
6205
      feedback_fn("Export not found. If trying to remove an export belonging"
6206
                  " to a deleted instance please use its Fully Qualified"
6207
                  " Domain Name.")
6208

    
6209

    
6210
class TagsLU(NoHooksLU):
6211
  """Generic tags LU.
6212

6213
  This is an abstract class which is the parent of all the other tags LUs.
6214

6215
  """
6216

    
6217
  def ExpandNames(self):
6218
    self.needed_locks = {}
6219
    if self.op.kind == constants.TAG_NODE:
6220
      name = self.cfg.ExpandNodeName(self.op.name)
6221
      if name is None:
6222
        raise errors.OpPrereqError("Invalid node name (%s)" %
6223
                                   (self.op.name,))
6224
      self.op.name = name
6225
      self.needed_locks[locking.LEVEL_NODE] = name
6226
    elif self.op.kind == constants.TAG_INSTANCE:
6227
      name = self.cfg.ExpandInstanceName(self.op.name)
6228
      if name is None:
6229
        raise errors.OpPrereqError("Invalid instance name (%s)" %
6230
                                   (self.op.name,))
6231
      self.op.name = name
6232
      self.needed_locks[locking.LEVEL_INSTANCE] = name
6233

    
6234
  def CheckPrereq(self):
6235
    """Check prerequisites.
6236

6237
    """
6238
    if self.op.kind == constants.TAG_CLUSTER:
6239
      self.target = self.cfg.GetClusterInfo()
6240
    elif self.op.kind == constants.TAG_NODE:
6241
      self.target = self.cfg.GetNodeInfo(self.op.name)
6242
    elif self.op.kind == constants.TAG_INSTANCE:
6243
      self.target = self.cfg.GetInstanceInfo(self.op.name)
6244
    else:
6245
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
6246
                                 str(self.op.kind))
6247

    
6248

    
6249
class LUGetTags(TagsLU):
6250
  """Returns the tags of a given object.
6251

6252
  """
6253
  _OP_REQP = ["kind", "name"]
6254
  REQ_BGL = False
6255

    
6256
  def Exec(self, feedback_fn):
6257
    """Returns the tag list.
6258

6259
    """
6260
    return list(self.target.GetTags())
6261

    
6262

    
6263
class LUSearchTags(NoHooksLU):
6264
  """Searches the tags for a given pattern.
6265

6266
  """
6267
  _OP_REQP = ["pattern"]
6268
  REQ_BGL = False
6269

    
6270
  def ExpandNames(self):
6271
    self.needed_locks = {}
6272

    
6273
  def CheckPrereq(self):
6274
    """Check prerequisites.
6275

6276
    This checks the pattern passed for validity by compiling it.
6277

6278
    """
6279
    try:
6280
      self.re = re.compile(self.op.pattern)
6281
    except re.error, err:
6282
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
6283
                                 (self.op.pattern, err))
6284

    
6285
  def Exec(self, feedback_fn):
6286
    """Returns the tag list.
6287

6288
    """
6289
    cfg = self.cfg
6290
    tgts = [("/cluster", cfg.GetClusterInfo())]
6291
    ilist = cfg.GetAllInstancesInfo().values()
6292
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
6293
    nlist = cfg.GetAllNodesInfo().values()
6294
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
6295
    results = []
6296
    for path, target in tgts:
6297
      for tag in target.GetTags():
6298
        if self.re.search(tag):
6299
          results.append((path, tag))
6300
    return results
6301

    
6302

    
6303
class LUAddTags(TagsLU):
6304
  """Sets a tag on a given object.
6305

6306
  """
6307
  _OP_REQP = ["kind", "name", "tags"]
6308
  REQ_BGL = False
6309

    
6310
  def CheckPrereq(self):
6311
    """Check prerequisites.
6312

6313
    This checks the type and length of the tag name and value.
6314

6315
    """
6316
    TagsLU.CheckPrereq(self)
6317
    for tag in self.op.tags:
6318
      objects.TaggableObject.ValidateTag(tag)
6319

    
6320
  def Exec(self, feedback_fn):
6321
    """Sets the tag.
6322

6323
    """
6324
    try:
6325
      for tag in self.op.tags:
6326
        self.target.AddTag(tag)
6327
    except errors.TagError, err:
6328
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
6329
    try:
6330
      self.cfg.Update(self.target)
6331
    except errors.ConfigurationError:
6332
      raise errors.OpRetryError("There has been a modification to the"
6333
                                " config file and the operation has been"
6334
                                " aborted. Please retry.")
6335

    
6336

    
6337
class LUDelTags(TagsLU):
6338
  """Delete a list of tags from a given object.
6339

6340
  """
6341
  _OP_REQP = ["kind", "name", "tags"]
6342
  REQ_BGL = False
6343

    
6344
  def CheckPrereq(self):
6345
    """Check prerequisites.
6346

6347
    This checks that we have the given tag.
6348

6349
    """
6350
    TagsLU.CheckPrereq(self)
6351
    for tag in self.op.tags:
6352
      objects.TaggableObject.ValidateTag(tag)
6353
    del_tags = frozenset(self.op.tags)
6354
    cur_tags = self.target.GetTags()
6355
    if not del_tags <= cur_tags:
6356
      diff_tags = del_tags - cur_tags
6357
      diff_names = ["'%s'" % tag for tag in diff_tags]
6358
      diff_names.sort()
6359
      raise errors.OpPrereqError("Tag(s) %s not found" %
6360
                                 (",".join(diff_names)))
6361

    
6362
  def Exec(self, feedback_fn):
6363
    """Remove the tag from the object.
6364

6365
    """
6366
    for tag in self.op.tags:
6367
      self.target.RemoveTag(tag)
6368
    try:
6369
      self.cfg.Update(self.target)
6370
    except errors.ConfigurationError:
6371
      raise errors.OpRetryError("There has been a modification to the"
6372
                                " config file and the operation has been"
6373
                                " aborted. Please retry.")
6374

    
6375

    
6376
class LUTestDelay(NoHooksLU):
6377
  """Sleep for a specified amount of time.
6378

6379
  This LU sleeps on the master and/or nodes for a specified amount of
6380
  time.
6381

6382
  """
6383
  _OP_REQP = ["duration", "on_master", "on_nodes"]
6384
  REQ_BGL = False
6385

    
6386
  def ExpandNames(self):
6387
    """Expand names and set required locks.
6388

6389
    This expands the node list, if any.
6390

6391
    """
6392
    self.needed_locks = {}
6393
    if self.op.on_nodes:
6394
      # _GetWantedNodes can be used here, but is not always appropriate to use
6395
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
6396
      # more information.
6397
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
6398
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
6399

    
6400
  def CheckPrereq(self):
6401
    """Check prerequisites.
6402

6403
    """
6404

    
6405
  def Exec(self, feedback_fn):
6406
    """Do the actual sleep.
6407

6408
    """
6409
    if self.op.on_master:
6410
      if not utils.TestDelay(self.op.duration):
6411
        raise errors.OpExecError("Error during master delay test")
6412
    if self.op.on_nodes:
6413
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
6414
      if not result:
6415
        raise errors.OpExecError("Complete failure from rpc call")
6416
      for node, node_result in result.items():
6417
        node_result.Raise()
6418
        if not node_result.data:
6419
          raise errors.OpExecError("Failure during rpc call to node %s,"
6420
                                   " result: %s" % (node, node_result.data))
6421

    
6422

    
6423
class IAllocator(object):
6424
  """IAllocator framework.
6425

6426
  An IAllocator instance has three sets of attributes:
6427
    - cfg that is needed to query the cluster
6428
    - input data (all members of the _KEYS class attribute are required)
6429
    - four buffer attributes (in|out_data|text), that represent the
6430
      input (to the external script) in text and data structure format,
6431
      and the output from it, again in two formats
6432
    - the result variables from the script (success, info, nodes) for
6433
      easy usage
6434

6435
  """
6436
  _ALLO_KEYS = [
6437
    "mem_size", "disks", "disk_template",
6438
    "os", "tags", "nics", "vcpus", "hypervisor",
6439
    ]
6440
  _RELO_KEYS = [
6441
    "relocate_from",
6442
    ]
6443

    
6444
  def __init__(self, lu, mode, name, **kwargs):
6445
    self.lu = lu
6446
    # init buffer variables
6447
    self.in_text = self.out_text = self.in_data = self.out_data = None
6448
    # init all input fields so that pylint is happy
6449
    self.mode = mode
6450
    self.name = name
6451
    self.mem_size = self.disks = self.disk_template = None
6452
    self.os = self.tags = self.nics = self.vcpus = None
6453
    self.hypervisor = None
6454
    self.relocate_from = None
6455
    # computed fields
6456
    self.required_nodes = None
6457
    # init result fields
6458
    self.success = self.info = self.nodes = None
6459
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6460
      keyset = self._ALLO_KEYS
6461
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6462
      keyset = self._RELO_KEYS
6463
    else:
6464
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
6465
                                   " IAllocator" % self.mode)
6466
    for key in kwargs:
6467
      if key not in keyset:
6468
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
6469
                                     " IAllocator" % key)
6470
      setattr(self, key, kwargs[key])
6471
    for key in keyset:
6472
      if key not in kwargs:
6473
        raise errors.ProgrammerError("Missing input parameter '%s' to"
6474
                                     " IAllocator" % key)
6475
    self._BuildInputData()
6476

    
6477
  def _ComputeClusterData(self):
6478
    """Compute the generic allocator input data.
6479

6480
    This is the data that is independent of the actual operation.
6481

6482
    """
6483
    cfg = self.lu.cfg
6484
    cluster_info = cfg.GetClusterInfo()
6485
    # cluster data
6486
    data = {
6487
      "version": 1,
6488
      "cluster_name": cfg.GetClusterName(),
6489
      "cluster_tags": list(cluster_info.GetTags()),
6490
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
6491
      # we don't have job IDs
6492
      }
6493
    iinfo = cfg.GetAllInstancesInfo().values()
6494
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
6495

    
6496
    # node data
6497
    node_results = {}
6498
    node_list = cfg.GetNodeList()
6499

    
6500
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6501
      hypervisor_name = self.hypervisor
6502
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6503
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
6504

    
6505
    node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
6506
                                           hypervisor_name)
6507
    node_iinfo = self.lu.rpc.call_all_instances_info(node_list,
6508
                       cluster_info.enabled_hypervisors)
6509
    for nname, nresult in node_data.items():
6510
      # first fill in static (config-based) values
6511
      ninfo = cfg.GetNodeInfo(nname)
6512
      pnr = {
6513
        "tags": list(ninfo.GetTags()),
6514
        "primary_ip": ninfo.primary_ip,
6515
        "secondary_ip": ninfo.secondary_ip,
6516
        "offline": ninfo.offline,
6517
        "drained": ninfo.drained,
6518
        "master_candidate": ninfo.master_candidate,
6519
        }
6520

    
6521
      if not ninfo.offline:
6522
        nresult.Raise()
6523
        if not isinstance(nresult.data, dict):
6524
          raise errors.OpExecError("Can't get data for node %s" % nname)
6525
        remote_info = nresult.data
6526
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
6527
                     'vg_size', 'vg_free', 'cpu_total']:
6528
          if attr not in remote_info:
6529
            raise errors.OpExecError("Node '%s' didn't return attribute"
6530
                                     " '%s'" % (nname, attr))
6531
          try:
6532
            remote_info[attr] = int(remote_info[attr])
6533
          except ValueError, err:
6534
            raise errors.OpExecError("Node '%s' returned invalid value"
6535
                                     " for '%s': %s" % (nname, attr, err))
6536
        # compute memory used by primary instances
6537
        i_p_mem = i_p_up_mem = 0
6538
        for iinfo, beinfo in i_list:
6539
          if iinfo.primary_node == nname:
6540
            i_p_mem += beinfo[constants.BE_MEMORY]
6541
            if iinfo.name not in node_iinfo[nname].data:
6542
              i_used_mem = 0
6543
            else:
6544
              i_used_mem = int(node_iinfo[nname].data[iinfo.name]['memory'])
6545
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
6546
            remote_info['memory_free'] -= max(0, i_mem_diff)
6547

    
6548
            if iinfo.admin_up:
6549
              i_p_up_mem += beinfo[constants.BE_MEMORY]
6550

    
6551
        # compute memory used by instances
6552
        pnr_dyn = {
6553
          "total_memory": remote_info['memory_total'],
6554
          "reserved_memory": remote_info['memory_dom0'],
6555
          "free_memory": remote_info['memory_free'],
6556
          "total_disk": remote_info['vg_size'],
6557
          "free_disk": remote_info['vg_free'],
6558
          "total_cpus": remote_info['cpu_total'],
6559
          "i_pri_memory": i_p_mem,
6560
          "i_pri_up_memory": i_p_up_mem,
6561
          }
6562
        pnr.update(pnr_dyn)
6563

    
6564
      node_results[nname] = pnr
6565
    data["nodes"] = node_results
6566

    
6567
    # instance data
6568
    instance_data = {}
6569
    for iinfo, beinfo in i_list:
6570
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
6571
                  for n in iinfo.nics]
6572
      pir = {
6573
        "tags": list(iinfo.GetTags()),
6574
        "admin_up": iinfo.admin_up,
6575
        "vcpus": beinfo[constants.BE_VCPUS],
6576
        "memory": beinfo[constants.BE_MEMORY],
6577
        "os": iinfo.os,
6578
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
6579
        "nics": nic_data,
6580
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
6581
        "disk_template": iinfo.disk_template,
6582
        "hypervisor": iinfo.hypervisor,
6583
        }
6584
      instance_data[iinfo.name] = pir
6585

    
6586
    data["instances"] = instance_data
6587

    
6588
    self.in_data = data
6589

    
6590
  def _AddNewInstance(self):
6591
    """Add new instance data to allocator structure.
6592

6593
    This in combination with _AllocatorGetClusterData will create the
6594
    correct structure needed as input for the allocator.
6595

6596
    The checks for the completeness of the opcode must have already been
6597
    done.
6598

6599
    """
6600
    data = self.in_data
6601

    
6602
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
6603

    
6604
    if self.disk_template in constants.DTS_NET_MIRROR:
6605
      self.required_nodes = 2
6606
    else:
6607
      self.required_nodes = 1
6608
    request = {
6609
      "type": "allocate",
6610
      "name": self.name,
6611
      "disk_template": self.disk_template,
6612
      "tags": self.tags,
6613
      "os": self.os,
6614
      "vcpus": self.vcpus,
6615
      "memory": self.mem_size,
6616
      "disks": self.disks,
6617
      "disk_space_total": disk_space,
6618
      "nics": self.nics,
6619
      "required_nodes": self.required_nodes,
6620
      }
6621
    data["request"] = request
6622

    
6623
  def _AddRelocateInstance(self):
6624
    """Add relocate instance data to allocator structure.
6625

6626
    This in combination with _IAllocatorGetClusterData will create the
6627
    correct structure needed as input for the allocator.
6628

6629
    The checks for the completeness of the opcode must have already been
6630
    done.
6631

6632
    """
6633
    instance = self.lu.cfg.GetInstanceInfo(self.name)
6634
    if instance is None:
6635
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
6636
                                   " IAllocator" % self.name)
6637

    
6638
    if instance.disk_template not in constants.DTS_NET_MIRROR:
6639
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
6640

    
6641
    if len(instance.secondary_nodes) != 1:
6642
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
6643

    
6644
    self.required_nodes = 1
6645
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
6646
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
6647

    
6648
    request = {
6649
      "type": "relocate",
6650
      "name": self.name,
6651
      "disk_space_total": disk_space,
6652
      "required_nodes": self.required_nodes,
6653
      "relocate_from": self.relocate_from,
6654
      }
6655
    self.in_data["request"] = request
6656

    
6657
  def _BuildInputData(self):
6658
    """Build input data structures.
6659

6660
    """
6661
    self._ComputeClusterData()
6662

    
6663
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6664
      self._AddNewInstance()
6665
    else:
6666
      self._AddRelocateInstance()
6667

    
6668
    self.in_text = serializer.Dump(self.in_data)
6669

    
6670
  def Run(self, name, validate=True, call_fn=None):
6671
    """Run an instance allocator and return the results.
6672

6673
    """
6674
    if call_fn is None:
6675
      call_fn = self.lu.rpc.call_iallocator_runner
6676
    data = self.in_text
6677

    
6678
    result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
6679
    result.Raise()
6680

    
6681
    if not isinstance(result.data, (list, tuple)) or len(result.data) != 4:
6682
      raise errors.OpExecError("Invalid result from master iallocator runner")
6683

    
6684
    rcode, stdout, stderr, fail = result.data
6685

    
6686
    if rcode == constants.IARUN_NOTFOUND:
6687
      raise errors.OpExecError("Can't find allocator '%s'" % name)
6688
    elif rcode == constants.IARUN_FAILURE:
6689
      raise errors.OpExecError("Instance allocator call failed: %s,"
6690
                               " output: %s" % (fail, stdout+stderr))
6691
    self.out_text = stdout
6692
    if validate:
6693
      self._ValidateResult()
6694

    
6695
  def _ValidateResult(self):
6696
    """Process the allocator results.
6697

6698
    This will process and if successful save the result in
6699
    self.out_data and the other parameters.
6700

6701
    """
6702
    try:
6703
      rdict = serializer.Load(self.out_text)
6704
    except Exception, err:
6705
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
6706

    
6707
    if not isinstance(rdict, dict):
6708
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
6709

    
6710
    for key in "success", "info", "nodes":
6711
      if key not in rdict:
6712
        raise errors.OpExecError("Can't parse iallocator results:"
6713
                                 " missing key '%s'" % key)
6714
      setattr(self, key, rdict[key])
6715

    
6716
    if not isinstance(rdict["nodes"], list):
6717
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
6718
                               " is not a list")
6719
    self.out_data = rdict
6720

    
6721

    
6722
class LUTestAllocator(NoHooksLU):
6723
  """Run allocator tests.
6724

6725
  This LU runs the allocator tests
6726

6727
  """
6728
  _OP_REQP = ["direction", "mode", "name"]
6729

    
6730
  def CheckPrereq(self):
6731
    """Check prerequisites.
6732

6733
    This checks the opcode parameters depending on the director and mode test.
6734

6735
    """
6736
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
6737
      for attr in ["name", "mem_size", "disks", "disk_template",
6738
                   "os", "tags", "nics", "vcpus"]:
6739
        if not hasattr(self.op, attr):
6740
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
6741
                                     attr)
6742
      iname = self.cfg.ExpandInstanceName(self.op.name)
6743
      if iname is not None:
6744
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
6745
                                   iname)
6746
      if not isinstance(self.op.nics, list):
6747
        raise errors.OpPrereqError("Invalid parameter 'nics'")
6748
      for row in self.op.nics:
6749
        if (not isinstance(row, dict) or
6750
            "mac" not in row or
6751
            "ip" not in row or
6752
            "bridge" not in row):
6753
          raise errors.OpPrereqError("Invalid contents of the"
6754
                                     " 'nics' parameter")
6755
      if not isinstance(self.op.disks, list):
6756
        raise errors.OpPrereqError("Invalid parameter 'disks'")
6757
      for row in self.op.disks:
6758
        if (not isinstance(row, dict) or
6759
            "size" not in row or
6760
            not isinstance(row["size"], int) or
6761
            "mode" not in row or
6762
            row["mode"] not in ['r', 'w']):
6763
          raise errors.OpPrereqError("Invalid contents of the"
6764
                                     " 'disks' parameter")
6765
      if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None:
6766
        self.op.hypervisor = self.cfg.GetHypervisorType()
6767
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
6768
      if not hasattr(self.op, "name"):
6769
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
6770
      fname = self.cfg.ExpandInstanceName(self.op.name)
6771
      if fname is None:
6772
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
6773
                                   self.op.name)
6774
      self.op.name = fname
6775
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
6776
    else:
6777
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
6778
                                 self.op.mode)
6779

    
6780
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
6781
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
6782
        raise errors.OpPrereqError("Missing allocator name")
6783
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
6784
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
6785
                                 self.op.direction)
6786

    
6787
  def Exec(self, feedback_fn):
6788
    """Run the allocator test.
6789

6790
    """
6791
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
6792
      ial = IAllocator(self,
6793
                       mode=self.op.mode,
6794
                       name=self.op.name,
6795
                       mem_size=self.op.mem_size,
6796
                       disks=self.op.disks,
6797
                       disk_template=self.op.disk_template,
6798
                       os=self.op.os,
6799
                       tags=self.op.tags,
6800
                       nics=self.op.nics,
6801
                       vcpus=self.op.vcpus,
6802
                       hypervisor=self.op.hypervisor,
6803
                       )
6804
    else:
6805
      ial = IAllocator(self,
6806
                       mode=self.op.mode,
6807
                       name=self.op.name,
6808
                       relocate_from=list(self.relocate_from),
6809
                       )
6810

    
6811
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
6812
      result = ial.in_text
6813
    else:
6814
      ial.Run(self.op.allocator, validate=False)
6815
      result = ial.out_text
6816
    return result