Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 9939547b

History | View | Annotate | Download (232 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the master-side code."""
23

    
24
# pylint: disable-msg=W0613,W0201
25

    
26
import os
27
import os.path
28
import sha
29
import time
30
import tempfile
31
import re
32
import platform
33
import logging
34
import copy
35
import random
36

    
37
from ganeti import ssh
38
from ganeti import utils
39
from ganeti import errors
40
from ganeti import hypervisor
41
from ganeti import locking
42
from ganeti import constants
43
from ganeti import objects
44
from ganeti import opcodes
45
from ganeti import serializer
46
from ganeti import ssconf
47

    
48

    
49
class LogicalUnit(object):
50
  """Logical Unit base class.
51

52
  Subclasses must follow these rules:
53
    - implement ExpandNames
54
    - implement CheckPrereq
55
    - implement Exec
56
    - implement BuildHooksEnv
57
    - redefine HPATH and HTYPE
58
    - optionally redefine their run requirements:
59
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
60

61
  Note that all commands require root permissions.
62

63
  """
64
  HPATH = None
65
  HTYPE = None
66
  _OP_REQP = []
67
  REQ_BGL = True
68

    
69
  def __init__(self, processor, op, context, rpc):
70
    """Constructor for LogicalUnit.
71

72
    This needs to be overriden in derived classes in order to check op
73
    validity.
74

75
    """
76
    self.proc = processor
77
    self.op = op
78
    self.cfg = context.cfg
79
    self.context = context
80
    self.rpc = rpc
81
    # Dicts used to declare locking needs to mcpu
82
    self.needed_locks = None
83
    self.acquired_locks = {}
84
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
85
    self.add_locks = {}
86
    self.remove_locks = {}
87
    # Used to force good behavior when calling helper functions
88
    self.recalculate_locks = {}
89
    self.__ssh = None
90
    # logging
91
    self.LogWarning = processor.LogWarning
92
    self.LogInfo = processor.LogInfo
93

    
94
    for attr_name in self._OP_REQP:
95
      attr_val = getattr(op, attr_name, None)
96
      if attr_val is None:
97
        raise errors.OpPrereqError("Required parameter '%s' missing" %
98
                                   attr_name)
99
    self.CheckArguments()
100

    
101
  def __GetSSH(self):
102
    """Returns the SshRunner object
103

104
    """
105
    if not self.__ssh:
106
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
107
    return self.__ssh
108

    
109
  ssh = property(fget=__GetSSH)
110

    
111
  def CheckArguments(self):
112
    """Check syntactic validity for the opcode arguments.
113

114
    This method is for doing a simple syntactic check and ensure
115
    validity of opcode parameters, without any cluster-related
116
    checks. While the same can be accomplished in ExpandNames and/or
117
    CheckPrereq, doing these separate is better because:
118

119
      - ExpandNames is left as as purely a lock-related function
120
      - CheckPrereq is run after we have aquired locks (and possible
121
        waited for them)
122

123
    The function is allowed to change the self.op attribute so that
124
    later methods can no longer worry about missing parameters.
125

126
    """
127
    pass
128

    
129
  def ExpandNames(self):
130
    """Expand names for this LU.
131

132
    This method is called before starting to execute the opcode, and it should
133
    update all the parameters of the opcode to their canonical form (e.g. a
134
    short node name must be fully expanded after this method has successfully
135
    completed). This way locking, hooks, logging, ecc. can work correctly.
136

137
    LUs which implement this method must also populate the self.needed_locks
138
    member, as a dict with lock levels as keys, and a list of needed lock names
139
    as values. Rules:
140

141
      - use an empty dict if you don't need any lock
142
      - if you don't need any lock at a particular level omit that level
143
      - don't put anything for the BGL level
144
      - if you want all locks at a level use locking.ALL_SET as a value
145

146
    If you need to share locks (rather than acquire them exclusively) at one
147
    level you can modify self.share_locks, setting a true value (usually 1) for
148
    that level. By default locks are not shared.
149

150
    Examples::
151

152
      # Acquire all nodes and one instance
153
      self.needed_locks = {
154
        locking.LEVEL_NODE: locking.ALL_SET,
155
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
156
      }
157
      # Acquire just two nodes
158
      self.needed_locks = {
159
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
160
      }
161
      # Acquire no locks
162
      self.needed_locks = {} # No, you can't leave it to the default value None
163

164
    """
165
    # The implementation of this method is mandatory only if the new LU is
166
    # concurrent, so that old LUs don't need to be changed all at the same
167
    # time.
168
    if self.REQ_BGL:
169
      self.needed_locks = {} # Exclusive LUs don't need locks.
170
    else:
171
      raise NotImplementedError
172

    
173
  def DeclareLocks(self, level):
174
    """Declare LU locking needs for a level
175

176
    While most LUs can just declare their locking needs at ExpandNames time,
177
    sometimes there's the need to calculate some locks after having acquired
178
    the ones before. This function is called just before acquiring locks at a
179
    particular level, but after acquiring the ones at lower levels, and permits
180
    such calculations. It can be used to modify self.needed_locks, and by
181
    default it does nothing.
182

183
    This function is only called if you have something already set in
184
    self.needed_locks for the level.
185

186
    @param level: Locking level which is going to be locked
187
    @type level: member of ganeti.locking.LEVELS
188

189
    """
190

    
191
  def CheckPrereq(self):
192
    """Check prerequisites for this LU.
193

194
    This method should check that the prerequisites for the execution
195
    of this LU are fulfilled. It can do internode communication, but
196
    it should be idempotent - no cluster or system changes are
197
    allowed.
198

199
    The method should raise errors.OpPrereqError in case something is
200
    not fulfilled. Its return value is ignored.
201

202
    This method should also update all the parameters of the opcode to
203
    their canonical form if it hasn't been done by ExpandNames before.
204

205
    """
206
    raise NotImplementedError
207

    
208
  def Exec(self, feedback_fn):
209
    """Execute the LU.
210

211
    This method should implement the actual work. It should raise
212
    errors.OpExecError for failures that are somewhat dealt with in
213
    code, or expected.
214

215
    """
216
    raise NotImplementedError
217

    
218
  def BuildHooksEnv(self):
219
    """Build hooks environment for this LU.
220

221
    This method should return a three-node tuple consisting of: a dict
222
    containing the environment that will be used for running the
223
    specific hook for this LU, a list of node names on which the hook
224
    should run before the execution, and a list of node names on which
225
    the hook should run after the execution.
226

227
    The keys of the dict must not have 'GANETI_' prefixed as this will
228
    be handled in the hooks runner. Also note additional keys will be
229
    added by the hooks runner. If the LU doesn't define any
230
    environment, an empty dict (and not None) should be returned.
231

232
    No nodes should be returned as an empty list (and not None).
233

234
    Note that if the HPATH for a LU class is None, this function will
235
    not be called.
236

237
    """
238
    raise NotImplementedError
239

    
240
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
241
    """Notify the LU about the results of its hooks.
242

243
    This method is called every time a hooks phase is executed, and notifies
244
    the Logical Unit about the hooks' result. The LU can then use it to alter
245
    its result based on the hooks.  By default the method does nothing and the
246
    previous result is passed back unchanged but any LU can define it if it
247
    wants to use the local cluster hook-scripts somehow.
248

249
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
250
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
251
    @param hook_results: the results of the multi-node hooks rpc call
252
    @param feedback_fn: function used send feedback back to the caller
253
    @param lu_result: the previous Exec result this LU had, or None
254
        in the PRE phase
255
    @return: the new Exec result, based on the previous result
256
        and hook results
257

258
    """
259
    return lu_result
260

    
261
  def _ExpandAndLockInstance(self):
262
    """Helper function to expand and lock an instance.
263

264
    Many LUs that work on an instance take its name in self.op.instance_name
265
    and need to expand it and then declare the expanded name for locking. This
266
    function does it, and then updates self.op.instance_name to the expanded
267
    name. It also initializes needed_locks as a dict, if this hasn't been done
268
    before.
269

270
    """
271
    if self.needed_locks is None:
272
      self.needed_locks = {}
273
    else:
274
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
275
        "_ExpandAndLockInstance called with instance-level locks set"
276
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
277
    if expanded_name is None:
278
      raise errors.OpPrereqError("Instance '%s' not known" %
279
                                  self.op.instance_name)
280
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
281
    self.op.instance_name = expanded_name
282

    
283
  def _LockInstancesNodes(self, primary_only=False):
284
    """Helper function to declare instances' nodes for locking.
285

286
    This function should be called after locking one or more instances to lock
287
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
288
    with all primary or secondary nodes for instances already locked and
289
    present in self.needed_locks[locking.LEVEL_INSTANCE].
290

291
    It should be called from DeclareLocks, and for safety only works if
292
    self.recalculate_locks[locking.LEVEL_NODE] is set.
293

294
    In the future it may grow parameters to just lock some instance's nodes, or
295
    to just lock primaries or secondary nodes, if needed.
296

297
    If should be called in DeclareLocks in a way similar to::
298

299
      if level == locking.LEVEL_NODE:
300
        self._LockInstancesNodes()
301

302
    @type primary_only: boolean
303
    @param primary_only: only lock primary nodes of locked instances
304

305
    """
306
    assert locking.LEVEL_NODE in self.recalculate_locks, \
307
      "_LockInstancesNodes helper function called with no nodes to recalculate"
308

    
309
    # TODO: check if we're really been called with the instance locks held
310

    
311
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
312
    # future we might want to have different behaviors depending on the value
313
    # of self.recalculate_locks[locking.LEVEL_NODE]
314
    wanted_nodes = []
315
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
316
      instance = self.context.cfg.GetInstanceInfo(instance_name)
317
      wanted_nodes.append(instance.primary_node)
318
      if not primary_only:
319
        wanted_nodes.extend(instance.secondary_nodes)
320

    
321
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
322
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
323
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
324
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
325

    
326
    del self.recalculate_locks[locking.LEVEL_NODE]
327

    
328

    
329
class NoHooksLU(LogicalUnit):
330
  """Simple LU which runs no hooks.
331

332
  This LU is intended as a parent for other LogicalUnits which will
333
  run no hooks, in order to reduce duplicate code.
334

335
  """
336
  HPATH = None
337
  HTYPE = None
338

    
339

    
340
def _GetWantedNodes(lu, nodes):
341
  """Returns list of checked and expanded node names.
342

343
  @type lu: L{LogicalUnit}
344
  @param lu: the logical unit on whose behalf we execute
345
  @type nodes: list
346
  @param nodes: list of node names or None for all nodes
347
  @rtype: list
348
  @return: the list of nodes, sorted
349
  @raise errors.OpProgrammerError: if the nodes parameter is wrong type
350

351
  """
352
  if not isinstance(nodes, list):
353
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
354

    
355
  if not nodes:
356
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
357
      " non-empty list of nodes whose name is to be expanded.")
358

    
359
  wanted = []
360
  for name in nodes:
361
    node = lu.cfg.ExpandNodeName(name)
362
    if node is None:
363
      raise errors.OpPrereqError("No such node name '%s'" % name)
364
    wanted.append(node)
365

    
366
  return utils.NiceSort(wanted)
367

    
368

    
369
def _GetWantedInstances(lu, instances):
370
  """Returns list of checked and expanded instance names.
371

372
  @type lu: L{LogicalUnit}
373
  @param lu: the logical unit on whose behalf we execute
374
  @type instances: list
375
  @param instances: list of instance names or None for all instances
376
  @rtype: list
377
  @return: the list of instances, sorted
378
  @raise errors.OpPrereqError: if the instances parameter is wrong type
379
  @raise errors.OpPrereqError: if any of the passed instances is not found
380

381
  """
382
  if not isinstance(instances, list):
383
    raise errors.OpPrereqError("Invalid argument type 'instances'")
384

    
385
  if instances:
386
    wanted = []
387

    
388
    for name in instances:
389
      instance = lu.cfg.ExpandInstanceName(name)
390
      if instance is None:
391
        raise errors.OpPrereqError("No such instance name '%s'" % name)
392
      wanted.append(instance)
393

    
394
  else:
395
    wanted = lu.cfg.GetInstanceList()
396
  return utils.NiceSort(wanted)
397

    
398

    
399
def _CheckOutputFields(static, dynamic, selected):
400
  """Checks whether all selected fields are valid.
401

402
  @type static: L{utils.FieldSet}
403
  @param static: static fields set
404
  @type dynamic: L{utils.FieldSet}
405
  @param dynamic: dynamic fields set
406

407
  """
408
  f = utils.FieldSet()
409
  f.Extend(static)
410
  f.Extend(dynamic)
411

    
412
  delta = f.NonMatching(selected)
413
  if delta:
414
    raise errors.OpPrereqError("Unknown output fields selected: %s"
415
                               % ",".join(delta))
416

    
417

    
418
def _CheckBooleanOpField(op, name):
419
  """Validates boolean opcode parameters.
420

421
  This will ensure that an opcode parameter is either a boolean value,
422
  or None (but that it always exists).
423

424
  """
425
  val = getattr(op, name, None)
426
  if not (val is None or isinstance(val, bool)):
427
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
428
                               (name, str(val)))
429
  setattr(op, name, val)
430

    
431

    
432
def _CheckNodeOnline(lu, node):
433
  """Ensure that a given node is online.
434

435
  @param lu: the LU on behalf of which we make the check
436
  @param node: the node to check
437
  @raise errors.OpPrereqError: if the nodes is offline
438

439
  """
440
  if lu.cfg.GetNodeInfo(node).offline:
441
    raise errors.OpPrereqError("Can't use offline node %s" % node)
442

    
443

    
444
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
445
                          memory, vcpus, nics):
446
  """Builds instance related env variables for hooks
447

448
  This builds the hook environment from individual variables.
449

450
  @type name: string
451
  @param name: the name of the instance
452
  @type primary_node: string
453
  @param primary_node: the name of the instance's primary node
454
  @type secondary_nodes: list
455
  @param secondary_nodes: list of secondary nodes as strings
456
  @type os_type: string
457
  @param os_type: the name of the instance's OS
458
  @type status: boolean
459
  @param status: the should_run status of the instance
460
  @type memory: string
461
  @param memory: the memory size of the instance
462
  @type vcpus: string
463
  @param vcpus: the count of VCPUs the instance has
464
  @type nics: list
465
  @param nics: list of tuples (ip, bridge, mac) representing
466
      the NICs the instance  has
467
  @rtype: dict
468
  @return: the hook environment for this instance
469

470
  """
471
  if status:
472
    str_status = "up"
473
  else:
474
    str_status = "down"
475
  env = {
476
    "OP_TARGET": name,
477
    "INSTANCE_NAME": name,
478
    "INSTANCE_PRIMARY": primary_node,
479
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
480
    "INSTANCE_OS_TYPE": os_type,
481
    "INSTANCE_STATUS": str_status,
482
    "INSTANCE_MEMORY": memory,
483
    "INSTANCE_VCPUS": vcpus,
484
  }
485

    
486
  if nics:
487
    nic_count = len(nics)
488
    for idx, (ip, bridge, mac) in enumerate(nics):
489
      if ip is None:
490
        ip = ""
491
      env["INSTANCE_NIC%d_IP" % idx] = ip
492
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
493
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
494
  else:
495
    nic_count = 0
496

    
497
  env["INSTANCE_NIC_COUNT"] = nic_count
498

    
499
  return env
500

    
501

    
502
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
503
  """Builds instance related env variables for hooks from an object.
504

505
  @type lu: L{LogicalUnit}
506
  @param lu: the logical unit on whose behalf we execute
507
  @type instance: L{objects.Instance}
508
  @param instance: the instance for which we should build the
509
      environment
510
  @type override: dict
511
  @param override: dictionary with key/values that will override
512
      our values
513
  @rtype: dict
514
  @return: the hook environment dictionary
515

516
  """
517
  bep = lu.cfg.GetClusterInfo().FillBE(instance)
518
  args = {
519
    'name': instance.name,
520
    'primary_node': instance.primary_node,
521
    'secondary_nodes': instance.secondary_nodes,
522
    'os_type': instance.os,
523
    'status': instance.admin_up,
524
    'memory': bep[constants.BE_MEMORY],
525
    'vcpus': bep[constants.BE_VCPUS],
526
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
527
  }
528
  if override:
529
    args.update(override)
530
  return _BuildInstanceHookEnv(**args)
531

    
532

    
533
def _AdjustCandidatePool(lu):
534
  """Adjust the candidate pool after node operations.
535

536
  """
537
  mod_list = lu.cfg.MaintainCandidatePool()
538
  if mod_list:
539
    lu.LogInfo("Promoted nodes to master candidate role: %s",
540
               ", ".join(node.name for node in mod_list))
541
    for name in mod_list:
542
      lu.context.ReaddNode(name)
543
  mc_now, mc_max = lu.cfg.GetMasterCandidateStats()
544
  if mc_now > mc_max:
545
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
546
               (mc_now, mc_max))
547

    
548

    
549
def _CheckInstanceBridgesExist(lu, instance):
550
  """Check that the brigdes needed by an instance exist.
551

552
  """
553
  # check bridges existance
554
  brlist = [nic.bridge for nic in instance.nics]
555
  result = lu.rpc.call_bridges_exist(instance.primary_node, brlist)
556
  result.Raise()
557
  if not result.data:
558
    raise errors.OpPrereqError("One or more target bridges %s does not"
559
                               " exist on destination node '%s'" %
560
                               (brlist, instance.primary_node))
561

    
562

    
563
class LUDestroyCluster(NoHooksLU):
564
  """Logical unit for destroying the cluster.
565

566
  """
567
  _OP_REQP = []
568

    
569
  def CheckPrereq(self):
570
    """Check prerequisites.
571

572
    This checks whether the cluster is empty.
573

574
    Any errors are signalled by raising errors.OpPrereqError.
575

576
    """
577
    master = self.cfg.GetMasterNode()
578

    
579
    nodelist = self.cfg.GetNodeList()
580
    if len(nodelist) != 1 or nodelist[0] != master:
581
      raise errors.OpPrereqError("There are still %d node(s) in"
582
                                 " this cluster." % (len(nodelist) - 1))
583
    instancelist = self.cfg.GetInstanceList()
584
    if instancelist:
585
      raise errors.OpPrereqError("There are still %d instance(s) in"
586
                                 " this cluster." % len(instancelist))
587

    
588
  def Exec(self, feedback_fn):
589
    """Destroys the cluster.
590

591
    """
592
    master = self.cfg.GetMasterNode()
593
    result = self.rpc.call_node_stop_master(master, False)
594
    result.Raise()
595
    if not result.data:
596
      raise errors.OpExecError("Could not disable the master role")
597
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
598
    utils.CreateBackup(priv_key)
599
    utils.CreateBackup(pub_key)
600
    return master
601

    
602

    
603
class LUVerifyCluster(LogicalUnit):
604
  """Verifies the cluster status.
605

606
  """
607
  HPATH = "cluster-verify"
608
  HTYPE = constants.HTYPE_CLUSTER
609
  _OP_REQP = ["skip_checks"]
610
  REQ_BGL = False
611

    
612
  def ExpandNames(self):
613
    self.needed_locks = {
614
      locking.LEVEL_NODE: locking.ALL_SET,
615
      locking.LEVEL_INSTANCE: locking.ALL_SET,
616
    }
617
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
618

    
619
  def _VerifyNode(self, nodeinfo, file_list, local_cksum,
620
                  node_result, feedback_fn, master_files,
621
                  drbd_map):
622
    """Run multiple tests against a node.
623

624
    Test list:
625

626
      - compares ganeti version
627
      - checks vg existance and size > 20G
628
      - checks config file checksum
629
      - checks ssh to other nodes
630

631
    @type nodeinfo: L{objects.Node}
632
    @param nodeinfo: the node to check
633
    @param file_list: required list of files
634
    @param local_cksum: dictionary of local files and their checksums
635
    @param node_result: the results from the node
636
    @param feedback_fn: function used to accumulate results
637
    @param master_files: list of files that only masters should have
638
    @param drbd_map: the useddrbd minors for this node, in
639
        form of minor: (instance, must_exist) which correspond to instances
640
        and their running status
641

642
    """
643
    node = nodeinfo.name
644

    
645
    # main result, node_result should be a non-empty dict
646
    if not node_result or not isinstance(node_result, dict):
647
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
648
      return True
649

    
650
    # compares ganeti version
651
    local_version = constants.PROTOCOL_VERSION
652
    remote_version = node_result.get('version', None)
653
    if not remote_version:
654
      feedback_fn("  - ERROR: connection to %s failed" % (node))
655
      return True
656

    
657
    if local_version != remote_version:
658
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
659
                      (local_version, node, remote_version))
660
      return True
661

    
662
    # checks vg existance and size > 20G
663

    
664
    bad = False
665
    vglist = node_result.get(constants.NV_VGLIST, None)
666
    if not vglist:
667
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
668
                      (node,))
669
      bad = True
670
    else:
671
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
672
                                            constants.MIN_VG_SIZE)
673
      if vgstatus:
674
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
675
        bad = True
676

    
677
    # checks config file checksum
678

    
679
    remote_cksum = node_result.get(constants.NV_FILELIST, None)
680
    if not isinstance(remote_cksum, dict):
681
      bad = True
682
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
683
    else:
684
      for file_name in file_list:
685
        node_is_mc = nodeinfo.master_candidate
686
        must_have_file = file_name not in master_files
687
        if file_name not in remote_cksum:
688
          if node_is_mc or must_have_file:
689
            bad = True
690
            feedback_fn("  - ERROR: file '%s' missing" % file_name)
691
        elif remote_cksum[file_name] != local_cksum[file_name]:
692
          if node_is_mc or must_have_file:
693
            bad = True
694
            feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
695
          else:
696
            # not candidate and this is not a must-have file
697
            bad = True
698
            feedback_fn("  - ERROR: non master-candidate has old/wrong file"
699
                        " '%s'" % file_name)
700
        else:
701
          # all good, except non-master/non-must have combination
702
          if not node_is_mc and not must_have_file:
703
            feedback_fn("  - ERROR: file '%s' should not exist on non master"
704
                        " candidates" % file_name)
705

    
706
    # checks ssh to any
707

    
708
    if constants.NV_NODELIST not in node_result:
709
      bad = True
710
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
711
    else:
712
      if node_result[constants.NV_NODELIST]:
713
        bad = True
714
        for node in node_result[constants.NV_NODELIST]:
715
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
716
                          (node, node_result[constants.NV_NODELIST][node]))
717

    
718
    if constants.NV_NODENETTEST not in node_result:
719
      bad = True
720
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
721
    else:
722
      if node_result[constants.NV_NODENETTEST]:
723
        bad = True
724
        nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys())
725
        for node in nlist:
726
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
727
                          (node, node_result[constants.NV_NODENETTEST][node]))
728

    
729
    hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
730
    if isinstance(hyp_result, dict):
731
      for hv_name, hv_result in hyp_result.iteritems():
732
        if hv_result is not None:
733
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
734
                      (hv_name, hv_result))
735

    
736
    # check used drbd list
737
    used_minors = node_result.get(constants.NV_DRBDLIST, [])
738
    for minor, (iname, must_exist) in drbd_map.items():
739
      if minor not in used_minors and must_exist:
740
        feedback_fn("  - ERROR: drbd minor %d of instance %s is not active" %
741
                    (minor, iname))
742
        bad = True
743
    for minor in used_minors:
744
      if minor not in drbd_map:
745
        feedback_fn("  - ERROR: unallocated drbd minor %d is in use" % minor)
746
        bad = True
747

    
748
    return bad
749

    
750
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
751
                      node_instance, feedback_fn, n_offline):
752
    """Verify an instance.
753

754
    This function checks to see if the required block devices are
755
    available on the instance's node.
756

757
    """
758
    bad = False
759

    
760
    node_current = instanceconfig.primary_node
761

    
762
    node_vol_should = {}
763
    instanceconfig.MapLVsByNode(node_vol_should)
764

    
765
    for node in node_vol_should:
766
      if node in n_offline:
767
        # ignore missing volumes on offline nodes
768
        continue
769
      for volume in node_vol_should[node]:
770
        if node not in node_vol_is or volume not in node_vol_is[node]:
771
          feedback_fn("  - ERROR: volume %s missing on node %s" %
772
                          (volume, node))
773
          bad = True
774

    
775
    if instanceconfig.admin_up:
776
      if ((node_current not in node_instance or
777
          not instance in node_instance[node_current]) and
778
          node_current not in n_offline):
779
        feedback_fn("  - ERROR: instance %s not running on node %s" %
780
                        (instance, node_current))
781
        bad = True
782

    
783
    for node in node_instance:
784
      if (not node == node_current):
785
        if instance in node_instance[node]:
786
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
787
                          (instance, node))
788
          bad = True
789

    
790
    return bad
791

    
792
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
793
    """Verify if there are any unknown volumes in the cluster.
794

795
    The .os, .swap and backup volumes are ignored. All other volumes are
796
    reported as unknown.
797

798
    """
799
    bad = False
800

    
801
    for node in node_vol_is:
802
      for volume in node_vol_is[node]:
803
        if node not in node_vol_should or volume not in node_vol_should[node]:
804
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
805
                      (volume, node))
806
          bad = True
807
    return bad
808

    
809
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
810
    """Verify the list of running instances.
811

812
    This checks what instances are running but unknown to the cluster.
813

814
    """
815
    bad = False
816
    for node in node_instance:
817
      for runninginstance in node_instance[node]:
818
        if runninginstance not in instancelist:
819
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
820
                          (runninginstance, node))
821
          bad = True
822
    return bad
823

    
824
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
825
    """Verify N+1 Memory Resilience.
826

827
    Check that if one single node dies we can still start all the instances it
828
    was primary for.
829

830
    """
831
    bad = False
832

    
833
    for node, nodeinfo in node_info.iteritems():
834
      # This code checks that every node which is now listed as secondary has
835
      # enough memory to host all instances it is supposed to should a single
836
      # other node in the cluster fail.
837
      # FIXME: not ready for failover to an arbitrary node
838
      # FIXME: does not support file-backed instances
839
      # WARNING: we currently take into account down instances as well as up
840
      # ones, considering that even if they're down someone might want to start
841
      # them even in the event of a node failure.
842
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
843
        needed_mem = 0
844
        for instance in instances:
845
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
846
          if bep[constants.BE_AUTO_BALANCE]:
847
            needed_mem += bep[constants.BE_MEMORY]
848
        if nodeinfo['mfree'] < needed_mem:
849
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
850
                      " failovers should node %s fail" % (node, prinode))
851
          bad = True
852
    return bad
853

    
854
  def CheckPrereq(self):
855
    """Check prerequisites.
856

857
    Transform the list of checks we're going to skip into a set and check that
858
    all its members are valid.
859

860
    """
861
    self.skip_set = frozenset(self.op.skip_checks)
862
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
863
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
864

    
865
  def BuildHooksEnv(self):
866
    """Build hooks env.
867

868
    Cluster-Verify hooks just rone in the post phase and their failure makes
869
    the output be logged in the verify output and the verification to fail.
870

871
    """
872
    all_nodes = self.cfg.GetNodeList()
873
    # TODO: populate the environment with useful information for verify hooks
874
    env = {}
875
    return env, [], all_nodes
876

    
877
  def Exec(self, feedback_fn):
878
    """Verify integrity of cluster, performing various test on nodes.
879

880
    """
881
    bad = False
882
    feedback_fn("* Verifying global settings")
883
    for msg in self.cfg.VerifyConfig():
884
      feedback_fn("  - ERROR: %s" % msg)
885

    
886
    vg_name = self.cfg.GetVGName()
887
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
888
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
889
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
890
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
891
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
892
                        for iname in instancelist)
893
    i_non_redundant = [] # Non redundant instances
894
    i_non_a_balanced = [] # Non auto-balanced instances
895
    n_offline = [] # List of offline nodes
896
    node_volume = {}
897
    node_instance = {}
898
    node_info = {}
899
    instance_cfg = {}
900

    
901
    # FIXME: verify OS list
902
    # do local checksums
903
    master_files = [constants.CLUSTER_CONF_FILE]
904

    
905
    file_names = ssconf.SimpleStore().GetFileList()
906
    file_names.append(constants.SSL_CERT_FILE)
907
    file_names.append(constants.RAPI_CERT_FILE)
908
    file_names.extend(master_files)
909

    
910
    local_checksums = utils.FingerprintFiles(file_names)
911

    
912
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
913
    node_verify_param = {
914
      constants.NV_FILELIST: file_names,
915
      constants.NV_NODELIST: [node.name for node in nodeinfo
916
                              if not node.offline],
917
      constants.NV_HYPERVISOR: hypervisors,
918
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
919
                                  node.secondary_ip) for node in nodeinfo
920
                                 if not node.offline],
921
      constants.NV_LVLIST: vg_name,
922
      constants.NV_INSTANCELIST: hypervisors,
923
      constants.NV_VGLIST: None,
924
      constants.NV_VERSION: None,
925
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
926
      constants.NV_DRBDLIST: None,
927
      }
928
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
929
                                           self.cfg.GetClusterName())
930

    
931
    cluster = self.cfg.GetClusterInfo()
932
    master_node = self.cfg.GetMasterNode()
933
    all_drbd_map = self.cfg.ComputeDRBDMap()
934

    
935
    for node_i in nodeinfo:
936
      node = node_i.name
937
      nresult = all_nvinfo[node].data
938

    
939
      if node_i.offline:
940
        feedback_fn("* Skipping offline node %s" % (node,))
941
        n_offline.append(node)
942
        continue
943

    
944
      if node == master_node:
945
        ntype = "master"
946
      elif node_i.master_candidate:
947
        ntype = "master candidate"
948
      else:
949
        ntype = "regular"
950
      feedback_fn("* Verifying node %s (%s)" % (node, ntype))
951

    
952
      if all_nvinfo[node].failed or not isinstance(nresult, dict):
953
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
954
        bad = True
955
        continue
956

    
957
      node_drbd = {}
958
      for minor, instance in all_drbd_map[node].items():
959
        instance = instanceinfo[instance]
960
        node_drbd[minor] = (instance.name, instance.admin_up)
961
      result = self._VerifyNode(node_i, file_names, local_checksums,
962
                                nresult, feedback_fn, master_files,
963
                                node_drbd)
964
      bad = bad or result
965

    
966
      lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
967
      if isinstance(lvdata, basestring):
968
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
969
                    (node, lvdata.encode('string_escape')))
970
        bad = True
971
        node_volume[node] = {}
972
      elif not isinstance(lvdata, dict):
973
        feedback_fn("  - ERROR: connection to %s failed (lvlist)" % (node,))
974
        bad = True
975
        continue
976
      else:
977
        node_volume[node] = lvdata
978

    
979
      # node_instance
980
      idata = nresult.get(constants.NV_INSTANCELIST, None)
981
      if not isinstance(idata, list):
982
        feedback_fn("  - ERROR: connection to %s failed (instancelist)" %
983
                    (node,))
984
        bad = True
985
        continue
986

    
987
      node_instance[node] = idata
988

    
989
      # node_info
990
      nodeinfo = nresult.get(constants.NV_HVINFO, None)
991
      if not isinstance(nodeinfo, dict):
992
        feedback_fn("  - ERROR: connection to %s failed (hvinfo)" % (node,))
993
        bad = True
994
        continue
995

    
996
      try:
997
        node_info[node] = {
998
          "mfree": int(nodeinfo['memory_free']),
999
          "dfree": int(nresult[constants.NV_VGLIST][vg_name]),
1000
          "pinst": [],
1001
          "sinst": [],
1002
          # dictionary holding all instances this node is secondary for,
1003
          # grouped by their primary node. Each key is a cluster node, and each
1004
          # value is a list of instances which have the key as primary and the
1005
          # current node as secondary.  this is handy to calculate N+1 memory
1006
          # availability if you can only failover from a primary to its
1007
          # secondary.
1008
          "sinst-by-pnode": {},
1009
        }
1010
      except ValueError:
1011
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
1012
        bad = True
1013
        continue
1014

    
1015
    node_vol_should = {}
1016

    
1017
    for instance in instancelist:
1018
      feedback_fn("* Verifying instance %s" % instance)
1019
      inst_config = instanceinfo[instance]
1020
      result =  self._VerifyInstance(instance, inst_config, node_volume,
1021
                                     node_instance, feedback_fn, n_offline)
1022
      bad = bad or result
1023
      inst_nodes_offline = []
1024

    
1025
      inst_config.MapLVsByNode(node_vol_should)
1026

    
1027
      instance_cfg[instance] = inst_config
1028

    
1029
      pnode = inst_config.primary_node
1030
      if pnode in node_info:
1031
        node_info[pnode]['pinst'].append(instance)
1032
      elif pnode not in n_offline:
1033
        feedback_fn("  - ERROR: instance %s, connection to primary node"
1034
                    " %s failed" % (instance, pnode))
1035
        bad = True
1036

    
1037
      if pnode in n_offline:
1038
        inst_nodes_offline.append(pnode)
1039

    
1040
      # If the instance is non-redundant we cannot survive losing its primary
1041
      # node, so we are not N+1 compliant. On the other hand we have no disk
1042
      # templates with more than one secondary so that situation is not well
1043
      # supported either.
1044
      # FIXME: does not support file-backed instances
1045
      if len(inst_config.secondary_nodes) == 0:
1046
        i_non_redundant.append(instance)
1047
      elif len(inst_config.secondary_nodes) > 1:
1048
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
1049
                    % instance)
1050

    
1051
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1052
        i_non_a_balanced.append(instance)
1053

    
1054
      for snode in inst_config.secondary_nodes:
1055
        if snode in node_info:
1056
          node_info[snode]['sinst'].append(instance)
1057
          if pnode not in node_info[snode]['sinst-by-pnode']:
1058
            node_info[snode]['sinst-by-pnode'][pnode] = []
1059
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
1060
        elif snode not in n_offline:
1061
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
1062
                      " %s failed" % (instance, snode))
1063
          bad = True
1064
        if snode in n_offline:
1065
          inst_nodes_offline.append(snode)
1066

    
1067
      if inst_nodes_offline:
1068
        # warn that the instance lives on offline nodes, and set bad=True
1069
        feedback_fn("  - ERROR: instance lives on offline node(s) %s" %
1070
                    ", ".join(inst_nodes_offline))
1071
        bad = True
1072

    
1073
    feedback_fn("* Verifying orphan volumes")
1074
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
1075
                                       feedback_fn)
1076
    bad = bad or result
1077

    
1078
    feedback_fn("* Verifying remaining instances")
1079
    result = self._VerifyOrphanInstances(instancelist, node_instance,
1080
                                         feedback_fn)
1081
    bad = bad or result
1082

    
1083
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1084
      feedback_fn("* Verifying N+1 Memory redundancy")
1085
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
1086
      bad = bad or result
1087

    
1088
    feedback_fn("* Other Notes")
1089
    if i_non_redundant:
1090
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1091
                  % len(i_non_redundant))
1092

    
1093
    if i_non_a_balanced:
1094
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1095
                  % len(i_non_a_balanced))
1096

    
1097
    if n_offline:
1098
      feedback_fn("  - NOTICE: %d offline node(s) found." % len(n_offline))
1099

    
1100
    return not bad
1101

    
1102
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1103
    """Analize the post-hooks' result
1104

1105
    This method analyses the hook result, handles it, and sends some
1106
    nicely-formatted feedback back to the user.
1107

1108
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1109
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1110
    @param hooks_results: the results of the multi-node hooks rpc call
1111
    @param feedback_fn: function used send feedback back to the caller
1112
    @param lu_result: previous Exec result
1113
    @return: the new Exec result, based on the previous result
1114
        and hook results
1115

1116
    """
1117
    # We only really run POST phase hooks, and are only interested in
1118
    # their results
1119
    if phase == constants.HOOKS_PHASE_POST:
1120
      # Used to change hooks' output to proper indentation
1121
      indent_re = re.compile('^', re.M)
1122
      feedback_fn("* Hooks Results")
1123
      if not hooks_results:
1124
        feedback_fn("  - ERROR: general communication failure")
1125
        lu_result = 1
1126
      else:
1127
        for node_name in hooks_results:
1128
          show_node_header = True
1129
          res = hooks_results[node_name]
1130
          if res.failed or res.data is False or not isinstance(res.data, list):
1131
            if res.offline:
1132
              # no need to warn or set fail return value
1133
              continue
1134
            feedback_fn("    Communication failure in hooks execution")
1135
            lu_result = 1
1136
            continue
1137
          for script, hkr, output in res.data:
1138
            if hkr == constants.HKR_FAIL:
1139
              # The node header is only shown once, if there are
1140
              # failing hooks on that node
1141
              if show_node_header:
1142
                feedback_fn("  Node %s:" % node_name)
1143
                show_node_header = False
1144
              feedback_fn("    ERROR: Script %s failed, output:" % script)
1145
              output = indent_re.sub('      ', output)
1146
              feedback_fn("%s" % output)
1147
              lu_result = 1
1148

    
1149
      return lu_result
1150

    
1151

    
1152
class LUVerifyDisks(NoHooksLU):
1153
  """Verifies the cluster disks status.
1154

1155
  """
1156
  _OP_REQP = []
1157
  REQ_BGL = False
1158

    
1159
  def ExpandNames(self):
1160
    self.needed_locks = {
1161
      locking.LEVEL_NODE: locking.ALL_SET,
1162
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1163
    }
1164
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1165

    
1166
  def CheckPrereq(self):
1167
    """Check prerequisites.
1168

1169
    This has no prerequisites.
1170

1171
    """
1172
    pass
1173

    
1174
  def Exec(self, feedback_fn):
1175
    """Verify integrity of cluster disks.
1176

1177
    """
1178
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
1179

    
1180
    vg_name = self.cfg.GetVGName()
1181
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1182
    instances = [self.cfg.GetInstanceInfo(name)
1183
                 for name in self.cfg.GetInstanceList()]
1184

    
1185
    nv_dict = {}
1186
    for inst in instances:
1187
      inst_lvs = {}
1188
      if (not inst.admin_up or
1189
          inst.disk_template not in constants.DTS_NET_MIRROR):
1190
        continue
1191
      inst.MapLVsByNode(inst_lvs)
1192
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1193
      for node, vol_list in inst_lvs.iteritems():
1194
        for vol in vol_list:
1195
          nv_dict[(node, vol)] = inst
1196

    
1197
    if not nv_dict:
1198
      return result
1199

    
1200
    node_lvs = self.rpc.call_volume_list(nodes, vg_name)
1201

    
1202
    to_act = set()
1203
    for node in nodes:
1204
      # node_volume
1205
      lvs = node_lvs[node]
1206
      if lvs.failed:
1207
        if not lvs.offline:
1208
          self.LogWarning("Connection to node %s failed: %s" %
1209
                          (node, lvs.data))
1210
        continue
1211
      lvs = lvs.data
1212
      if isinstance(lvs, basestring):
1213
        logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
1214
        res_nlvm[node] = lvs
1215
      elif not isinstance(lvs, dict):
1216
        logging.warning("Connection to node %s failed or invalid data"
1217
                        " returned", node)
1218
        res_nodes.append(node)
1219
        continue
1220

    
1221
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
1222
        inst = nv_dict.pop((node, lv_name), None)
1223
        if (not lv_online and inst is not None
1224
            and inst.name not in res_instances):
1225
          res_instances.append(inst.name)
1226

    
1227
    # any leftover items in nv_dict are missing LVs, let's arrange the
1228
    # data better
1229
    for key, inst in nv_dict.iteritems():
1230
      if inst.name not in res_missing:
1231
        res_missing[inst.name] = []
1232
      res_missing[inst.name].append(key)
1233

    
1234
    return result
1235

    
1236

    
1237
class LURenameCluster(LogicalUnit):
1238
  """Rename the cluster.
1239

1240
  """
1241
  HPATH = "cluster-rename"
1242
  HTYPE = constants.HTYPE_CLUSTER
1243
  _OP_REQP = ["name"]
1244

    
1245
  def BuildHooksEnv(self):
1246
    """Build hooks env.
1247

1248
    """
1249
    env = {
1250
      "OP_TARGET": self.cfg.GetClusterName(),
1251
      "NEW_NAME": self.op.name,
1252
      }
1253
    mn = self.cfg.GetMasterNode()
1254
    return env, [mn], [mn]
1255

    
1256
  def CheckPrereq(self):
1257
    """Verify that the passed name is a valid one.
1258

1259
    """
1260
    hostname = utils.HostInfo(self.op.name)
1261

    
1262
    new_name = hostname.name
1263
    self.ip = new_ip = hostname.ip
1264
    old_name = self.cfg.GetClusterName()
1265
    old_ip = self.cfg.GetMasterIP()
1266
    if new_name == old_name and new_ip == old_ip:
1267
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1268
                                 " cluster has changed")
1269
    if new_ip != old_ip:
1270
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1271
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1272
                                   " reachable on the network. Aborting." %
1273
                                   new_ip)
1274

    
1275
    self.op.name = new_name
1276

    
1277
  def Exec(self, feedback_fn):
1278
    """Rename the cluster.
1279

1280
    """
1281
    clustername = self.op.name
1282
    ip = self.ip
1283

    
1284
    # shutdown the master IP
1285
    master = self.cfg.GetMasterNode()
1286
    result = self.rpc.call_node_stop_master(master, False)
1287
    if result.failed or not result.data:
1288
      raise errors.OpExecError("Could not disable the master role")
1289

    
1290
    try:
1291
      cluster = self.cfg.GetClusterInfo()
1292
      cluster.cluster_name = clustername
1293
      cluster.master_ip = ip
1294
      self.cfg.Update(cluster)
1295

    
1296
      # update the known hosts file
1297
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
1298
      node_list = self.cfg.GetNodeList()
1299
      try:
1300
        node_list.remove(master)
1301
      except ValueError:
1302
        pass
1303
      result = self.rpc.call_upload_file(node_list,
1304
                                         constants.SSH_KNOWN_HOSTS_FILE)
1305
      for to_node, to_result in result.iteritems():
1306
        if to_result.failed or not to_result.data:
1307
          logging.error("Copy of file %s to node %s failed",
1308
                        constants.SSH_KNOWN_HOSTS_FILE, to_node)
1309

    
1310
    finally:
1311
      result = self.rpc.call_node_start_master(master, False)
1312
      if result.failed or not result.data:
1313
        self.LogWarning("Could not re-enable the master role on"
1314
                        " the master, please restart manually.")
1315

    
1316

    
1317
def _RecursiveCheckIfLVMBased(disk):
1318
  """Check if the given disk or its children are lvm-based.
1319

1320
  @type disk: L{objects.Disk}
1321
  @param disk: the disk to check
1322
  @rtype: booleean
1323
  @return: boolean indicating whether a LD_LV dev_type was found or not
1324

1325
  """
1326
  if disk.children:
1327
    for chdisk in disk.children:
1328
      if _RecursiveCheckIfLVMBased(chdisk):
1329
        return True
1330
  return disk.dev_type == constants.LD_LV
1331

    
1332

    
1333
class LUSetClusterParams(LogicalUnit):
1334
  """Change the parameters of the cluster.
1335

1336
  """
1337
  HPATH = "cluster-modify"
1338
  HTYPE = constants.HTYPE_CLUSTER
1339
  _OP_REQP = []
1340
  REQ_BGL = False
1341

    
1342
  def CheckParameters(self):
1343
    """Check parameters
1344

1345
    """
1346
    if not hasattr(self.op, "candidate_pool_size"):
1347
      self.op.candidate_pool_size = None
1348
    if self.op.candidate_pool_size is not None:
1349
      try:
1350
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1351
      except ValueError, err:
1352
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1353
                                   str(err))
1354
      if self.op.candidate_pool_size < 1:
1355
        raise errors.OpPrereqError("At least one master candidate needed")
1356

    
1357
  def ExpandNames(self):
1358
    # FIXME: in the future maybe other cluster params won't require checking on
1359
    # all nodes to be modified.
1360
    self.needed_locks = {
1361
      locking.LEVEL_NODE: locking.ALL_SET,
1362
    }
1363
    self.share_locks[locking.LEVEL_NODE] = 1
1364

    
1365
  def BuildHooksEnv(self):
1366
    """Build hooks env.
1367

1368
    """
1369
    env = {
1370
      "OP_TARGET": self.cfg.GetClusterName(),
1371
      "NEW_VG_NAME": self.op.vg_name,
1372
      }
1373
    mn = self.cfg.GetMasterNode()
1374
    return env, [mn], [mn]
1375

    
1376
  def CheckPrereq(self):
1377
    """Check prerequisites.
1378

1379
    This checks whether the given params don't conflict and
1380
    if the given volume group is valid.
1381

1382
    """
1383
    # FIXME: This only works because there is only one parameter that can be
1384
    # changed or removed.
1385
    if self.op.vg_name is not None and not self.op.vg_name:
1386
      instances = self.cfg.GetAllInstancesInfo().values()
1387
      for inst in instances:
1388
        for disk in inst.disks:
1389
          if _RecursiveCheckIfLVMBased(disk):
1390
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1391
                                       " lvm-based instances exist")
1392

    
1393
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1394

    
1395
    # if vg_name not None, checks given volume group on all nodes
1396
    if self.op.vg_name:
1397
      vglist = self.rpc.call_vg_list(node_list)
1398
      for node in node_list:
1399
        if vglist[node].failed:
1400
          # ignoring down node
1401
          self.LogWarning("Node %s unreachable/error, ignoring" % node)
1402
          continue
1403
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].data,
1404
                                              self.op.vg_name,
1405
                                              constants.MIN_VG_SIZE)
1406
        if vgstatus:
1407
          raise errors.OpPrereqError("Error on node '%s': %s" %
1408
                                     (node, vgstatus))
1409

    
1410
    self.cluster = cluster = self.cfg.GetClusterInfo()
1411
    # validate beparams changes
1412
    if self.op.beparams:
1413
      utils.CheckBEParams(self.op.beparams)
1414
      self.new_beparams = cluster.FillDict(
1415
        cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)
1416

    
1417
    # hypervisor list/parameters
1418
    self.new_hvparams = cluster.FillDict(cluster.hvparams, {})
1419
    if self.op.hvparams:
1420
      if not isinstance(self.op.hvparams, dict):
1421
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1422
      for hv_name, hv_dict in self.op.hvparams.items():
1423
        if hv_name not in self.new_hvparams:
1424
          self.new_hvparams[hv_name] = hv_dict
1425
        else:
1426
          self.new_hvparams[hv_name].update(hv_dict)
1427

    
1428
    if self.op.enabled_hypervisors is not None:
1429
      self.hv_list = self.op.enabled_hypervisors
1430
    else:
1431
      self.hv_list = cluster.enabled_hypervisors
1432

    
1433
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1434
      # either the enabled list has changed, or the parameters have, validate
1435
      for hv_name, hv_params in self.new_hvparams.items():
1436
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1437
            (self.op.enabled_hypervisors and
1438
             hv_name in self.op.enabled_hypervisors)):
1439
          # either this is a new hypervisor, or its parameters have changed
1440
          hv_class = hypervisor.GetHypervisor(hv_name)
1441
          hv_class.CheckParameterSyntax(hv_params)
1442
          _CheckHVParams(self, node_list, hv_name, hv_params)
1443

    
1444
  def Exec(self, feedback_fn):
1445
    """Change the parameters of the cluster.
1446

1447
    """
1448
    if self.op.vg_name is not None:
1449
      if self.op.vg_name != self.cfg.GetVGName():
1450
        self.cfg.SetVGName(self.op.vg_name)
1451
      else:
1452
        feedback_fn("Cluster LVM configuration already in desired"
1453
                    " state, not changing")
1454
    if self.op.hvparams:
1455
      self.cluster.hvparams = self.new_hvparams
1456
    if self.op.enabled_hypervisors is not None:
1457
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1458
    if self.op.beparams:
1459
      self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams
1460
    if self.op.candidate_pool_size is not None:
1461
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
1462

    
1463
    self.cfg.Update(self.cluster)
1464

    
1465
    # we want to update nodes after the cluster so that if any errors
1466
    # happen, we have recorded and saved the cluster info
1467
    if self.op.candidate_pool_size is not None:
1468
      _AdjustCandidatePool(self)
1469

    
1470

    
1471
class LURedistributeConfig(NoHooksLU):
1472
  """Force the redistribution of cluster configuration.
1473

1474
  This is a very simple LU.
1475

1476
  """
1477
  _OP_REQP = []
1478
  REQ_BGL = False
1479

    
1480
  def ExpandNames(self):
1481
    self.needed_locks = {
1482
      locking.LEVEL_NODE: locking.ALL_SET,
1483
    }
1484
    self.share_locks[locking.LEVEL_NODE] = 1
1485

    
1486
  def CheckPrereq(self):
1487
    """Check prerequisites.
1488

1489
    """
1490

    
1491
  def Exec(self, feedback_fn):
1492
    """Redistribute the configuration.
1493

1494
    """
1495
    self.cfg.Update(self.cfg.GetClusterInfo())
1496

    
1497

    
1498
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1499
  """Sleep and poll for an instance's disk to sync.
1500

1501
  """
1502
  if not instance.disks:
1503
    return True
1504

    
1505
  if not oneshot:
1506
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1507

    
1508
  node = instance.primary_node
1509

    
1510
  for dev in instance.disks:
1511
    lu.cfg.SetDiskID(dev, node)
1512

    
1513
  retries = 0
1514
  while True:
1515
    max_time = 0
1516
    done = True
1517
    cumul_degraded = False
1518
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1519
    if rstats.failed or not rstats.data:
1520
      lu.LogWarning("Can't get any data from node %s", node)
1521
      retries += 1
1522
      if retries >= 10:
1523
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1524
                                 " aborting." % node)
1525
      time.sleep(6)
1526
      continue
1527
    rstats = rstats.data
1528
    retries = 0
1529
    for i, mstat in enumerate(rstats):
1530
      if mstat is None:
1531
        lu.LogWarning("Can't compute data for node %s/%s",
1532
                           node, instance.disks[i].iv_name)
1533
        continue
1534
      # we ignore the ldisk parameter
1535
      perc_done, est_time, is_degraded, _ = mstat
1536
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1537
      if perc_done is not None:
1538
        done = False
1539
        if est_time is not None:
1540
          rem_time = "%d estimated seconds remaining" % est_time
1541
          max_time = est_time
1542
        else:
1543
          rem_time = "no time estimate"
1544
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1545
                        (instance.disks[i].iv_name, perc_done, rem_time))
1546
    if done or oneshot:
1547
      break
1548

    
1549
    time.sleep(min(60, max_time))
1550

    
1551
  if done:
1552
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1553
  return not cumul_degraded
1554

    
1555

    
1556
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1557
  """Check that mirrors are not degraded.
1558

1559
  The ldisk parameter, if True, will change the test from the
1560
  is_degraded attribute (which represents overall non-ok status for
1561
  the device(s)) to the ldisk (representing the local storage status).
1562

1563
  """
1564
  lu.cfg.SetDiskID(dev, node)
1565
  if ldisk:
1566
    idx = 6
1567
  else:
1568
    idx = 5
1569

    
1570
  result = True
1571
  if on_primary or dev.AssembleOnSecondary():
1572
    rstats = lu.rpc.call_blockdev_find(node, dev)
1573
    if rstats.failed or not rstats.data:
1574
      logging.warning("Node %s: disk degraded, not found or node down", node)
1575
      result = False
1576
    else:
1577
      result = result and (not rstats.data[idx])
1578
  if dev.children:
1579
    for child in dev.children:
1580
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1581

    
1582
  return result
1583

    
1584

    
1585
class LUDiagnoseOS(NoHooksLU):
1586
  """Logical unit for OS diagnose/query.
1587

1588
  """
1589
  _OP_REQP = ["output_fields", "names"]
1590
  REQ_BGL = False
1591
  _FIELDS_STATIC = utils.FieldSet()
1592
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
1593

    
1594
  def ExpandNames(self):
1595
    if self.op.names:
1596
      raise errors.OpPrereqError("Selective OS query not supported")
1597

    
1598
    _CheckOutputFields(static=self._FIELDS_STATIC,
1599
                       dynamic=self._FIELDS_DYNAMIC,
1600
                       selected=self.op.output_fields)
1601

    
1602
    # Lock all nodes, in shared mode
1603
    self.needed_locks = {}
1604
    self.share_locks[locking.LEVEL_NODE] = 1
1605
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1606

    
1607
  def CheckPrereq(self):
1608
    """Check prerequisites.
1609

1610
    """
1611

    
1612
  @staticmethod
1613
  def _DiagnoseByOS(node_list, rlist):
1614
    """Remaps a per-node return list into an a per-os per-node dictionary
1615

1616
    @param node_list: a list with the names of all nodes
1617
    @param rlist: a map with node names as keys and OS objects as values
1618

1619
    @rtype: dict
1620
    @returns: a dictionary with osnames as keys and as value another map, with
1621
        nodes as keys and list of OS objects as values, eg::
1622

1623
          {"debian-etch": {"node1": [<object>,...],
1624
                           "node2": [<object>,]}
1625
          }
1626

1627
    """
1628
    all_os = {}
1629
    for node_name, nr in rlist.iteritems():
1630
      if nr.failed or not nr.data:
1631
        continue
1632
      for os_obj in nr.data:
1633
        if os_obj.name not in all_os:
1634
          # build a list of nodes for this os containing empty lists
1635
          # for each node in node_list
1636
          all_os[os_obj.name] = {}
1637
          for nname in node_list:
1638
            all_os[os_obj.name][nname] = []
1639
        all_os[os_obj.name][node_name].append(os_obj)
1640
    return all_os
1641

    
1642
  def Exec(self, feedback_fn):
1643
    """Compute the list of OSes.
1644

1645
    """
1646
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1647
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()
1648
                   if node in node_list]
1649
    node_data = self.rpc.call_os_diagnose(valid_nodes)
1650
    if node_data == False:
1651
      raise errors.OpExecError("Can't gather the list of OSes")
1652
    pol = self._DiagnoseByOS(valid_nodes, node_data)
1653
    output = []
1654
    for os_name, os_data in pol.iteritems():
1655
      row = []
1656
      for field in self.op.output_fields:
1657
        if field == "name":
1658
          val = os_name
1659
        elif field == "valid":
1660
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1661
        elif field == "node_status":
1662
          val = {}
1663
          for node_name, nos_list in os_data.iteritems():
1664
            val[node_name] = [(v.status, v.path) for v in nos_list]
1665
        else:
1666
          raise errors.ParameterError(field)
1667
        row.append(val)
1668
      output.append(row)
1669

    
1670
    return output
1671

    
1672

    
1673
class LURemoveNode(LogicalUnit):
1674
  """Logical unit for removing a node.
1675

1676
  """
1677
  HPATH = "node-remove"
1678
  HTYPE = constants.HTYPE_NODE
1679
  _OP_REQP = ["node_name"]
1680

    
1681
  def BuildHooksEnv(self):
1682
    """Build hooks env.
1683

1684
    This doesn't run on the target node in the pre phase as a failed
1685
    node would then be impossible to remove.
1686

1687
    """
1688
    env = {
1689
      "OP_TARGET": self.op.node_name,
1690
      "NODE_NAME": self.op.node_name,
1691
      }
1692
    all_nodes = self.cfg.GetNodeList()
1693
    all_nodes.remove(self.op.node_name)
1694
    return env, all_nodes, all_nodes
1695

    
1696
  def CheckPrereq(self):
1697
    """Check prerequisites.
1698

1699
    This checks:
1700
     - the node exists in the configuration
1701
     - it does not have primary or secondary instances
1702
     - it's not the master
1703

1704
    Any errors are signalled by raising errors.OpPrereqError.
1705

1706
    """
1707
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1708
    if node is None:
1709
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1710

    
1711
    instance_list = self.cfg.GetInstanceList()
1712

    
1713
    masternode = self.cfg.GetMasterNode()
1714
    if node.name == masternode:
1715
      raise errors.OpPrereqError("Node is the master node,"
1716
                                 " you need to failover first.")
1717

    
1718
    for instance_name in instance_list:
1719
      instance = self.cfg.GetInstanceInfo(instance_name)
1720
      if node.name in instance.all_nodes:
1721
        raise errors.OpPrereqError("Instance %s is still running on the node,"
1722
                                   " please remove first." % instance_name)
1723
    self.op.node_name = node.name
1724
    self.node = node
1725

    
1726
  def Exec(self, feedback_fn):
1727
    """Removes the node from the cluster.
1728

1729
    """
1730
    node = self.node
1731
    logging.info("Stopping the node daemon and removing configs from node %s",
1732
                 node.name)
1733

    
1734
    self.context.RemoveNode(node.name)
1735

    
1736
    self.rpc.call_node_leave_cluster(node.name)
1737

    
1738
    # Promote nodes to master candidate as needed
1739
    _AdjustCandidatePool(self)
1740

    
1741

    
1742
class LUQueryNodes(NoHooksLU):
1743
  """Logical unit for querying nodes.
1744

1745
  """
1746
  _OP_REQP = ["output_fields", "names"]
1747
  REQ_BGL = False
1748
  _FIELDS_DYNAMIC = utils.FieldSet(
1749
    "dtotal", "dfree",
1750
    "mtotal", "mnode", "mfree",
1751
    "bootid",
1752
    "ctotal",
1753
    )
1754

    
1755
  _FIELDS_STATIC = utils.FieldSet(
1756
    "name", "pinst_cnt", "sinst_cnt",
1757
    "pinst_list", "sinst_list",
1758
    "pip", "sip", "tags",
1759
    "serial_no",
1760
    "master_candidate",
1761
    "master",
1762
    "offline",
1763
    )
1764

    
1765
  def ExpandNames(self):
1766
    _CheckOutputFields(static=self._FIELDS_STATIC,
1767
                       dynamic=self._FIELDS_DYNAMIC,
1768
                       selected=self.op.output_fields)
1769

    
1770
    self.needed_locks = {}
1771
    self.share_locks[locking.LEVEL_NODE] = 1
1772

    
1773
    if self.op.names:
1774
      self.wanted = _GetWantedNodes(self, self.op.names)
1775
    else:
1776
      self.wanted = locking.ALL_SET
1777

    
1778
    self.do_locking = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
1779
    if self.do_locking:
1780
      # if we don't request only static fields, we need to lock the nodes
1781
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
1782

    
1783

    
1784
  def CheckPrereq(self):
1785
    """Check prerequisites.
1786

1787
    """
1788
    # The validation of the node list is done in the _GetWantedNodes,
1789
    # if non empty, and if empty, there's no validation to do
1790
    pass
1791

    
1792
  def Exec(self, feedback_fn):
1793
    """Computes the list of nodes and their attributes.
1794

1795
    """
1796
    all_info = self.cfg.GetAllNodesInfo()
1797
    if self.do_locking:
1798
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
1799
    elif self.wanted != locking.ALL_SET:
1800
      nodenames = self.wanted
1801
      missing = set(nodenames).difference(all_info.keys())
1802
      if missing:
1803
        raise errors.OpExecError(
1804
          "Some nodes were removed before retrieving their data: %s" % missing)
1805
    else:
1806
      nodenames = all_info.keys()
1807

    
1808
    nodenames = utils.NiceSort(nodenames)
1809
    nodelist = [all_info[name] for name in nodenames]
1810

    
1811
    # begin data gathering
1812

    
1813
    if self.do_locking:
1814
      live_data = {}
1815
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
1816
                                          self.cfg.GetHypervisorType())
1817
      for name in nodenames:
1818
        nodeinfo = node_data[name]
1819
        if not nodeinfo.failed and nodeinfo.data:
1820
          nodeinfo = nodeinfo.data
1821
          fn = utils.TryConvert
1822
          live_data[name] = {
1823
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
1824
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
1825
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
1826
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
1827
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
1828
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
1829
            "bootid": nodeinfo.get('bootid', None),
1830
            }
1831
        else:
1832
          live_data[name] = {}
1833
    else:
1834
      live_data = dict.fromkeys(nodenames, {})
1835

    
1836
    node_to_primary = dict([(name, set()) for name in nodenames])
1837
    node_to_secondary = dict([(name, set()) for name in nodenames])
1838

    
1839
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1840
                             "sinst_cnt", "sinst_list"))
1841
    if inst_fields & frozenset(self.op.output_fields):
1842
      instancelist = self.cfg.GetInstanceList()
1843

    
1844
      for instance_name in instancelist:
1845
        inst = self.cfg.GetInstanceInfo(instance_name)
1846
        if inst.primary_node in node_to_primary:
1847
          node_to_primary[inst.primary_node].add(inst.name)
1848
        for secnode in inst.secondary_nodes:
1849
          if secnode in node_to_secondary:
1850
            node_to_secondary[secnode].add(inst.name)
1851

    
1852
    master_node = self.cfg.GetMasterNode()
1853

    
1854
    # end data gathering
1855

    
1856
    output = []
1857
    for node in nodelist:
1858
      node_output = []
1859
      for field in self.op.output_fields:
1860
        if field == "name":
1861
          val = node.name
1862
        elif field == "pinst_list":
1863
          val = list(node_to_primary[node.name])
1864
        elif field == "sinst_list":
1865
          val = list(node_to_secondary[node.name])
1866
        elif field == "pinst_cnt":
1867
          val = len(node_to_primary[node.name])
1868
        elif field == "sinst_cnt":
1869
          val = len(node_to_secondary[node.name])
1870
        elif field == "pip":
1871
          val = node.primary_ip
1872
        elif field == "sip":
1873
          val = node.secondary_ip
1874
        elif field == "tags":
1875
          val = list(node.GetTags())
1876
        elif field == "serial_no":
1877
          val = node.serial_no
1878
        elif field == "master_candidate":
1879
          val = node.master_candidate
1880
        elif field == "master":
1881
          val = node.name == master_node
1882
        elif field == "offline":
1883
          val = node.offline
1884
        elif self._FIELDS_DYNAMIC.Matches(field):
1885
          val = live_data[node.name].get(field, None)
1886
        else:
1887
          raise errors.ParameterError(field)
1888
        node_output.append(val)
1889
      output.append(node_output)
1890

    
1891
    return output
1892

    
1893

    
1894
class LUQueryNodeVolumes(NoHooksLU):
1895
  """Logical unit for getting volumes on node(s).
1896

1897
  """
1898
  _OP_REQP = ["nodes", "output_fields"]
1899
  REQ_BGL = False
1900
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
1901
  _FIELDS_STATIC = utils.FieldSet("node")
1902

    
1903
  def ExpandNames(self):
1904
    _CheckOutputFields(static=self._FIELDS_STATIC,
1905
                       dynamic=self._FIELDS_DYNAMIC,
1906
                       selected=self.op.output_fields)
1907

    
1908
    self.needed_locks = {}
1909
    self.share_locks[locking.LEVEL_NODE] = 1
1910
    if not self.op.nodes:
1911
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1912
    else:
1913
      self.needed_locks[locking.LEVEL_NODE] = \
1914
        _GetWantedNodes(self, self.op.nodes)
1915

    
1916
  def CheckPrereq(self):
1917
    """Check prerequisites.
1918

1919
    This checks that the fields required are valid output fields.
1920

1921
    """
1922
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
1923

    
1924
  def Exec(self, feedback_fn):
1925
    """Computes the list of nodes and their attributes.
1926

1927
    """
1928
    nodenames = self.nodes
1929
    volumes = self.rpc.call_node_volumes(nodenames)
1930

    
1931
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1932
             in self.cfg.GetInstanceList()]
1933

    
1934
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1935

    
1936
    output = []
1937
    for node in nodenames:
1938
      if node not in volumes or volumes[node].failed or not volumes[node].data:
1939
        continue
1940

    
1941
      node_vols = volumes[node].data[:]
1942
      node_vols.sort(key=lambda vol: vol['dev'])
1943

    
1944
      for vol in node_vols:
1945
        node_output = []
1946
        for field in self.op.output_fields:
1947
          if field == "node":
1948
            val = node
1949
          elif field == "phys":
1950
            val = vol['dev']
1951
          elif field == "vg":
1952
            val = vol['vg']
1953
          elif field == "name":
1954
            val = vol['name']
1955
          elif field == "size":
1956
            val = int(float(vol['size']))
1957
          elif field == "instance":
1958
            for inst in ilist:
1959
              if node not in lv_by_node[inst]:
1960
                continue
1961
              if vol['name'] in lv_by_node[inst][node]:
1962
                val = inst.name
1963
                break
1964
            else:
1965
              val = '-'
1966
          else:
1967
            raise errors.ParameterError(field)
1968
          node_output.append(str(val))
1969

    
1970
        output.append(node_output)
1971

    
1972
    return output
1973

    
1974

    
1975
class LUAddNode(LogicalUnit):
1976
  """Logical unit for adding node to the cluster.
1977

1978
  """
1979
  HPATH = "node-add"
1980
  HTYPE = constants.HTYPE_NODE
1981
  _OP_REQP = ["node_name"]
1982

    
1983
  def BuildHooksEnv(self):
1984
    """Build hooks env.
1985

1986
    This will run on all nodes before, and on all nodes + the new node after.
1987

1988
    """
1989
    env = {
1990
      "OP_TARGET": self.op.node_name,
1991
      "NODE_NAME": self.op.node_name,
1992
      "NODE_PIP": self.op.primary_ip,
1993
      "NODE_SIP": self.op.secondary_ip,
1994
      }
1995
    nodes_0 = self.cfg.GetNodeList()
1996
    nodes_1 = nodes_0 + [self.op.node_name, ]
1997
    return env, nodes_0, nodes_1
1998

    
1999
  def CheckPrereq(self):
2000
    """Check prerequisites.
2001

2002
    This checks:
2003
     - the new node is not already in the config
2004
     - it is resolvable
2005
     - its parameters (single/dual homed) matches the cluster
2006

2007
    Any errors are signalled by raising errors.OpPrereqError.
2008

2009
    """
2010
    node_name = self.op.node_name
2011
    cfg = self.cfg
2012

    
2013
    dns_data = utils.HostInfo(node_name)
2014

    
2015
    node = dns_data.name
2016
    primary_ip = self.op.primary_ip = dns_data.ip
2017
    secondary_ip = getattr(self.op, "secondary_ip", None)
2018
    if secondary_ip is None:
2019
      secondary_ip = primary_ip
2020
    if not utils.IsValidIP(secondary_ip):
2021
      raise errors.OpPrereqError("Invalid secondary IP given")
2022
    self.op.secondary_ip = secondary_ip
2023

    
2024
    node_list = cfg.GetNodeList()
2025
    if not self.op.readd and node in node_list:
2026
      raise errors.OpPrereqError("Node %s is already in the configuration" %
2027
                                 node)
2028
    elif self.op.readd and node not in node_list:
2029
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
2030

    
2031
    for existing_node_name in node_list:
2032
      existing_node = cfg.GetNodeInfo(existing_node_name)
2033

    
2034
      if self.op.readd and node == existing_node_name:
2035
        if (existing_node.primary_ip != primary_ip or
2036
            existing_node.secondary_ip != secondary_ip):
2037
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
2038
                                     " address configuration as before")
2039
        continue
2040

    
2041
      if (existing_node.primary_ip == primary_ip or
2042
          existing_node.secondary_ip == primary_ip or
2043
          existing_node.primary_ip == secondary_ip or
2044
          existing_node.secondary_ip == secondary_ip):
2045
        raise errors.OpPrereqError("New node ip address(es) conflict with"
2046
                                   " existing node %s" % existing_node.name)
2047

    
2048
    # check that the type of the node (single versus dual homed) is the
2049
    # same as for the master
2050
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
2051
    master_singlehomed = myself.secondary_ip == myself.primary_ip
2052
    newbie_singlehomed = secondary_ip == primary_ip
2053
    if master_singlehomed != newbie_singlehomed:
2054
      if master_singlehomed:
2055
        raise errors.OpPrereqError("The master has no private ip but the"
2056
                                   " new node has one")
2057
      else:
2058
        raise errors.OpPrereqError("The master has a private ip but the"
2059
                                   " new node doesn't have one")
2060

    
2061
    # checks reachablity
2062
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
2063
      raise errors.OpPrereqError("Node not reachable by ping")
2064

    
2065
    if not newbie_singlehomed:
2066
      # check reachability from my secondary ip to newbie's secondary ip
2067
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
2068
                           source=myself.secondary_ip):
2069
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
2070
                                   " based ping to noded port")
2071

    
2072
    cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2073
    mc_now, _ = self.cfg.GetMasterCandidateStats()
2074
    master_candidate = mc_now < cp_size
2075

    
2076
    self.new_node = objects.Node(name=node,
2077
                                 primary_ip=primary_ip,
2078
                                 secondary_ip=secondary_ip,
2079
                                 master_candidate=master_candidate,
2080
                                 offline=False)
2081

    
2082
  def Exec(self, feedback_fn):
2083
    """Adds the new node to the cluster.
2084

2085
    """
2086
    new_node = self.new_node
2087
    node = new_node.name
2088

    
2089
    # check connectivity
2090
    result = self.rpc.call_version([node])[node]
2091
    result.Raise()
2092
    if result.data:
2093
      if constants.PROTOCOL_VERSION == result.data:
2094
        logging.info("Communication to node %s fine, sw version %s match",
2095
                     node, result.data)
2096
      else:
2097
        raise errors.OpExecError("Version mismatch master version %s,"
2098
                                 " node version %s" %
2099
                                 (constants.PROTOCOL_VERSION, result.data))
2100
    else:
2101
      raise errors.OpExecError("Cannot get version from the new node")
2102

    
2103
    # setup ssh on node
2104
    logging.info("Copy ssh key to node %s", node)
2105
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
2106
    keyarray = []
2107
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
2108
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
2109
                priv_key, pub_key]
2110

    
2111
    for i in keyfiles:
2112
      f = open(i, 'r')
2113
      try:
2114
        keyarray.append(f.read())
2115
      finally:
2116
        f.close()
2117

    
2118
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
2119
                                    keyarray[2],
2120
                                    keyarray[3], keyarray[4], keyarray[5])
2121

    
2122
    if result.failed or not result.data:
2123
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
2124

    
2125
    # Add node to our /etc/hosts, and add key to known_hosts
2126
    utils.AddHostToEtcHosts(new_node.name)
2127

    
2128
    if new_node.secondary_ip != new_node.primary_ip:
2129
      result = self.rpc.call_node_has_ip_address(new_node.name,
2130
                                                 new_node.secondary_ip)
2131
      if result.failed or not result.data:
2132
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
2133
                                 " you gave (%s). Please fix and re-run this"
2134
                                 " command." % new_node.secondary_ip)
2135

    
2136
    node_verify_list = [self.cfg.GetMasterNode()]
2137
    node_verify_param = {
2138
      'nodelist': [node],
2139
      # TODO: do a node-net-test as well?
2140
    }
2141

    
2142
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
2143
                                       self.cfg.GetClusterName())
2144
    for verifier in node_verify_list:
2145
      if result[verifier].failed or not result[verifier].data:
2146
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
2147
                                 " for remote verification" % verifier)
2148
      if result[verifier].data['nodelist']:
2149
        for failed in result[verifier].data['nodelist']:
2150
          feedback_fn("ssh/hostname verification failed %s -> %s" %
2151
                      (verifier, result[verifier]['nodelist'][failed]))
2152
        raise errors.OpExecError("ssh/hostname verification failed.")
2153

    
2154
    # Distribute updated /etc/hosts and known_hosts to all nodes,
2155
    # including the node just added
2156
    myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
2157
    dist_nodes = self.cfg.GetNodeList()
2158
    if not self.op.readd:
2159
      dist_nodes.append(node)
2160
    if myself.name in dist_nodes:
2161
      dist_nodes.remove(myself.name)
2162

    
2163
    logging.debug("Copying hosts and known_hosts to all nodes")
2164
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
2165
      result = self.rpc.call_upload_file(dist_nodes, fname)
2166
      for to_node, to_result in result.iteritems():
2167
        if to_result.failed or not to_result.data:
2168
          logging.error("Copy of file %s to node %s failed", fname, to_node)
2169

    
2170
    to_copy = []
2171
    if constants.HT_XEN_HVM in self.cfg.GetClusterInfo().enabled_hypervisors:
2172
      to_copy.append(constants.VNC_PASSWORD_FILE)
2173
    for fname in to_copy:
2174
      result = self.rpc.call_upload_file([node], fname)
2175
      if result[node].failed or not result[node]:
2176
        logging.error("Could not copy file %s to node %s", fname, node)
2177

    
2178
    if self.op.readd:
2179
      self.context.ReaddNode(new_node)
2180
    else:
2181
      self.context.AddNode(new_node)
2182

    
2183

    
2184
class LUSetNodeParams(LogicalUnit):
2185
  """Modifies the parameters of a node.
2186

2187
  """
2188
  HPATH = "node-modify"
2189
  HTYPE = constants.HTYPE_NODE
2190
  _OP_REQP = ["node_name"]
2191
  REQ_BGL = False
2192

    
2193
  def CheckArguments(self):
2194
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2195
    if node_name is None:
2196
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2197
    self.op.node_name = node_name
2198
    _CheckBooleanOpField(self.op, 'master_candidate')
2199
    _CheckBooleanOpField(self.op, 'offline')
2200
    if self.op.master_candidate is None and self.op.offline is None:
2201
      raise errors.OpPrereqError("Please pass at least one modification")
2202
    if self.op.offline == True and self.op.master_candidate == True:
2203
      raise errors.OpPrereqError("Can't set the node into offline and"
2204
                                 " master_candidate at the same time")
2205

    
2206
  def ExpandNames(self):
2207
    self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
2208

    
2209
  def BuildHooksEnv(self):
2210
    """Build hooks env.
2211

2212
    This runs on the master node.
2213

2214
    """
2215
    env = {
2216
      "OP_TARGET": self.op.node_name,
2217
      "MASTER_CANDIDATE": str(self.op.master_candidate),
2218
      "OFFLINE": str(self.op.offline),
2219
      }
2220
    nl = [self.cfg.GetMasterNode(),
2221
          self.op.node_name]
2222
    return env, nl, nl
2223

    
2224
  def CheckPrereq(self):
2225
    """Check prerequisites.
2226

2227
    This only checks the instance list against the existing names.
2228

2229
    """
2230
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
2231

    
2232
    if ((self.op.master_candidate == False or self.op.offline == True)
2233
        and node.master_candidate):
2234
      # we will demote the node from master_candidate
2235
      if self.op.node_name == self.cfg.GetMasterNode():
2236
        raise errors.OpPrereqError("The master node has to be a"
2237
                                   " master candidate and online")
2238
      cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2239
      num_candidates, _ = self.cfg.GetMasterCandidateStats()
2240
      if num_candidates <= cp_size:
2241
        msg = ("Not enough master candidates (desired"
2242
               " %d, new value will be %d)" % (cp_size, num_candidates-1))
2243
        if self.op.force:
2244
          self.LogWarning(msg)
2245
        else:
2246
          raise errors.OpPrereqError(msg)
2247

    
2248
    if (self.op.master_candidate == True and node.offline and
2249
        not self.op.offline == False):
2250
      raise errors.OpPrereqError("Can't set an offline node to"
2251
                                 " master_candidate")
2252

    
2253
    return
2254

    
2255
  def Exec(self, feedback_fn):
2256
    """Modifies a node.
2257

2258
    """
2259
    node = self.node
2260

    
2261
    result = []
2262

    
2263
    if self.op.offline is not None:
2264
      node.offline = self.op.offline
2265
      result.append(("offline", str(self.op.offline)))
2266
      if self.op.offline == True and node.master_candidate:
2267
        node.master_candidate = False
2268
        result.append(("master_candidate", "auto-demotion due to offline"))
2269

    
2270
    if self.op.master_candidate is not None:
2271
      node.master_candidate = self.op.master_candidate
2272
      result.append(("master_candidate", str(self.op.master_candidate)))
2273
      if self.op.master_candidate == False:
2274
        rrc = self.rpc.call_node_demote_from_mc(node.name)
2275
        if (rrc.failed or not isinstance(rrc.data, (tuple, list))
2276
            or len(rrc.data) != 2):
2277
          self.LogWarning("Node rpc error: %s" % rrc.error)
2278
        elif not rrc.data[0]:
2279
          self.LogWarning("Node failed to demote itself: %s" % rrc.data[1])
2280

    
2281
    # this will trigger configuration file update, if needed
2282
    self.cfg.Update(node)
2283
    # this will trigger job queue propagation or cleanup
2284
    if self.op.node_name != self.cfg.GetMasterNode():
2285
      self.context.ReaddNode(node)
2286

    
2287
    return result
2288

    
2289

    
2290
class LUQueryClusterInfo(NoHooksLU):
2291
  """Query cluster configuration.
2292

2293
  """
2294
  _OP_REQP = []
2295
  REQ_BGL = False
2296

    
2297
  def ExpandNames(self):
2298
    self.needed_locks = {}
2299

    
2300
  def CheckPrereq(self):
2301
    """No prerequsites needed for this LU.
2302

2303
    """
2304
    pass
2305

    
2306
  def Exec(self, feedback_fn):
2307
    """Return cluster config.
2308

2309
    """
2310
    cluster = self.cfg.GetClusterInfo()
2311
    result = {
2312
      "software_version": constants.RELEASE_VERSION,
2313
      "protocol_version": constants.PROTOCOL_VERSION,
2314
      "config_version": constants.CONFIG_VERSION,
2315
      "os_api_version": constants.OS_API_VERSION,
2316
      "export_version": constants.EXPORT_VERSION,
2317
      "architecture": (platform.architecture()[0], platform.machine()),
2318
      "name": cluster.cluster_name,
2319
      "master": cluster.master_node,
2320
      "default_hypervisor": cluster.default_hypervisor,
2321
      "enabled_hypervisors": cluster.enabled_hypervisors,
2322
      "hvparams": cluster.hvparams,
2323
      "beparams": cluster.beparams,
2324
      "candidate_pool_size": cluster.candidate_pool_size,
2325
      }
2326

    
2327
    return result
2328

    
2329

    
2330
class LUQueryConfigValues(NoHooksLU):
2331
  """Return configuration values.
2332

2333
  """
2334
  _OP_REQP = []
2335
  REQ_BGL = False
2336
  _FIELDS_DYNAMIC = utils.FieldSet()
2337
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag")
2338

    
2339
  def ExpandNames(self):
2340
    self.needed_locks = {}
2341

    
2342
    _CheckOutputFields(static=self._FIELDS_STATIC,
2343
                       dynamic=self._FIELDS_DYNAMIC,
2344
                       selected=self.op.output_fields)
2345

    
2346
  def CheckPrereq(self):
2347
    """No prerequisites.
2348

2349
    """
2350
    pass
2351

    
2352
  def Exec(self, feedback_fn):
2353
    """Dump a representation of the cluster config to the standard output.
2354

2355
    """
2356
    values = []
2357
    for field in self.op.output_fields:
2358
      if field == "cluster_name":
2359
        entry = self.cfg.GetClusterName()
2360
      elif field == "master_node":
2361
        entry = self.cfg.GetMasterNode()
2362
      elif field == "drain_flag":
2363
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2364
      else:
2365
        raise errors.ParameterError(field)
2366
      values.append(entry)
2367
    return values
2368

    
2369

    
2370
class LUActivateInstanceDisks(NoHooksLU):
2371
  """Bring up an instance's disks.
2372

2373
  """
2374
  _OP_REQP = ["instance_name"]
2375
  REQ_BGL = False
2376

    
2377
  def ExpandNames(self):
2378
    self._ExpandAndLockInstance()
2379
    self.needed_locks[locking.LEVEL_NODE] = []
2380
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2381

    
2382
  def DeclareLocks(self, level):
2383
    if level == locking.LEVEL_NODE:
2384
      self._LockInstancesNodes()
2385

    
2386
  def CheckPrereq(self):
2387
    """Check prerequisites.
2388

2389
    This checks that the instance is in the cluster.
2390

2391
    """
2392
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2393
    assert self.instance is not None, \
2394
      "Cannot retrieve locked instance %s" % self.op.instance_name
2395
    _CheckNodeOnline(self, self.instance.primary_node)
2396

    
2397
  def Exec(self, feedback_fn):
2398
    """Activate the disks.
2399

2400
    """
2401
    disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
2402
    if not disks_ok:
2403
      raise errors.OpExecError("Cannot activate block devices")
2404

    
2405
    return disks_info
2406

    
2407

    
2408
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
2409
  """Prepare the block devices for an instance.
2410

2411
  This sets up the block devices on all nodes.
2412

2413
  @type lu: L{LogicalUnit}
2414
  @param lu: the logical unit on whose behalf we execute
2415
  @type instance: L{objects.Instance}
2416
  @param instance: the instance for whose disks we assemble
2417
  @type ignore_secondaries: boolean
2418
  @param ignore_secondaries: if true, errors on secondary nodes
2419
      won't result in an error return from the function
2420
  @return: False if the operation failed, otherwise a list of
2421
      (host, instance_visible_name, node_visible_name)
2422
      with the mapping from node devices to instance devices
2423

2424
  """
2425
  device_info = []
2426
  disks_ok = True
2427
  iname = instance.name
2428
  # With the two passes mechanism we try to reduce the window of
2429
  # opportunity for the race condition of switching DRBD to primary
2430
  # before handshaking occured, but we do not eliminate it
2431

    
2432
  # The proper fix would be to wait (with some limits) until the
2433
  # connection has been made and drbd transitions from WFConnection
2434
  # into any other network-connected state (Connected, SyncTarget,
2435
  # SyncSource, etc.)
2436

    
2437
  # 1st pass, assemble on all nodes in secondary mode
2438
  for inst_disk in instance.disks:
2439
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2440
      lu.cfg.SetDiskID(node_disk, node)
2441
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2442
      if result.failed or not result:
2443
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2444
                           " (is_primary=False, pass=1)",
2445
                           inst_disk.iv_name, node)
2446
        if not ignore_secondaries:
2447
          disks_ok = False
2448

    
2449
  # FIXME: race condition on drbd migration to primary
2450

    
2451
  # 2nd pass, do only the primary node
2452
  for inst_disk in instance.disks:
2453
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2454
      if node != instance.primary_node:
2455
        continue
2456
      lu.cfg.SetDiskID(node_disk, node)
2457
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2458
      if result.failed or not result:
2459
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2460
                           " (is_primary=True, pass=2)",
2461
                           inst_disk.iv_name, node)
2462
        disks_ok = False
2463
    device_info.append((instance.primary_node, inst_disk.iv_name, result.data))
2464

    
2465
  # leave the disks configured for the primary node
2466
  # this is a workaround that would be fixed better by
2467
  # improving the logical/physical id handling
2468
  for disk in instance.disks:
2469
    lu.cfg.SetDiskID(disk, instance.primary_node)
2470

    
2471
  return disks_ok, device_info
2472

    
2473

    
2474
def _StartInstanceDisks(lu, instance, force):
2475
  """Start the disks of an instance.
2476

2477
  """
2478
  disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2479
                                           ignore_secondaries=force)
2480
  if not disks_ok:
2481
    _ShutdownInstanceDisks(lu, instance)
2482
    if force is not None and not force:
2483
      lu.proc.LogWarning("", hint="If the message above refers to a"
2484
                         " secondary node,"
2485
                         " you can retry the operation using '--force'.")
2486
    raise errors.OpExecError("Disk consistency error")
2487

    
2488

    
2489
class LUDeactivateInstanceDisks(NoHooksLU):
2490
  """Shutdown an instance's disks.
2491

2492
  """
2493
  _OP_REQP = ["instance_name"]
2494
  REQ_BGL = False
2495

    
2496
  def ExpandNames(self):
2497
    self._ExpandAndLockInstance()
2498
    self.needed_locks[locking.LEVEL_NODE] = []
2499
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2500

    
2501
  def DeclareLocks(self, level):
2502
    if level == locking.LEVEL_NODE:
2503
      self._LockInstancesNodes()
2504

    
2505
  def CheckPrereq(self):
2506
    """Check prerequisites.
2507

2508
    This checks that the instance is in the cluster.
2509

2510
    """
2511
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2512
    assert self.instance is not None, \
2513
      "Cannot retrieve locked instance %s" % self.op.instance_name
2514

    
2515
  def Exec(self, feedback_fn):
2516
    """Deactivate the disks
2517

2518
    """
2519
    instance = self.instance
2520
    _SafeShutdownInstanceDisks(self, instance)
2521

    
2522

    
2523
def _SafeShutdownInstanceDisks(lu, instance):
2524
  """Shutdown block devices of an instance.
2525

2526
  This function checks if an instance is running, before calling
2527
  _ShutdownInstanceDisks.
2528

2529
  """
2530
  ins_l = lu.rpc.call_instance_list([instance.primary_node],
2531
                                      [instance.hypervisor])
2532
  ins_l = ins_l[instance.primary_node]
2533
  if ins_l.failed or not isinstance(ins_l.data, list):
2534
    raise errors.OpExecError("Can't contact node '%s'" %
2535
                             instance.primary_node)
2536

    
2537
  if instance.name in ins_l.data:
2538
    raise errors.OpExecError("Instance is running, can't shutdown"
2539
                             " block devices.")
2540

    
2541
  _ShutdownInstanceDisks(lu, instance)
2542

    
2543

    
2544
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2545
  """Shutdown block devices of an instance.
2546

2547
  This does the shutdown on all nodes of the instance.
2548

2549
  If the ignore_primary is false, errors on the primary node are
2550
  ignored.
2551

2552
  """
2553
  result = True
2554
  for disk in instance.disks:
2555
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2556
      lu.cfg.SetDiskID(top_disk, node)
2557
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
2558
      if result.failed or not result.data:
2559
        logging.error("Could not shutdown block device %s on node %s",
2560
                      disk.iv_name, node)
2561
        if not ignore_primary or node != instance.primary_node:
2562
          result = False
2563
  return result
2564

    
2565

    
2566
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
2567
  """Checks if a node has enough free memory.
2568

2569
  This function check if a given node has the needed amount of free
2570
  memory. In case the node has less memory or we cannot get the
2571
  information from the node, this function raise an OpPrereqError
2572
  exception.
2573

2574
  @type lu: C{LogicalUnit}
2575
  @param lu: a logical unit from which we get configuration data
2576
  @type node: C{str}
2577
  @param node: the node to check
2578
  @type reason: C{str}
2579
  @param reason: string to use in the error message
2580
  @type requested: C{int}
2581
  @param requested: the amount of memory in MiB to check for
2582
  @type hypervisor_name: C{str}
2583
  @param hypervisor_name: the hypervisor to ask for memory stats
2584
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2585
      we cannot check the node
2586

2587
  """
2588
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
2589
  nodeinfo[node].Raise()
2590
  free_mem = nodeinfo[node].data.get('memory_free')
2591
  if not isinstance(free_mem, int):
2592
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2593
                             " was '%s'" % (node, free_mem))
2594
  if requested > free_mem:
2595
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2596
                             " needed %s MiB, available %s MiB" %
2597
                             (node, reason, requested, free_mem))
2598

    
2599

    
2600
class LUStartupInstance(LogicalUnit):
2601
  """Starts an instance.
2602

2603
  """
2604
  HPATH = "instance-start"
2605
  HTYPE = constants.HTYPE_INSTANCE
2606
  _OP_REQP = ["instance_name", "force"]
2607
  REQ_BGL = False
2608

    
2609
  def ExpandNames(self):
2610
    self._ExpandAndLockInstance()
2611

    
2612
  def BuildHooksEnv(self):
2613
    """Build hooks env.
2614

2615
    This runs on master, primary and secondary nodes of the instance.
2616

2617
    """
2618
    env = {
2619
      "FORCE": self.op.force,
2620
      }
2621
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2622
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2623
    return env, nl, nl
2624

    
2625
  def CheckPrereq(self):
2626
    """Check prerequisites.
2627

2628
    This checks that the instance is in the cluster.
2629

2630
    """
2631
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2632
    assert self.instance is not None, \
2633
      "Cannot retrieve locked instance %s" % self.op.instance_name
2634

    
2635
    _CheckNodeOnline(self, instance.primary_node)
2636

    
2637
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2638
    # check bridges existance
2639
    _CheckInstanceBridgesExist(self, instance)
2640

    
2641
    _CheckNodeFreeMemory(self, instance.primary_node,
2642
                         "starting instance %s" % instance.name,
2643
                         bep[constants.BE_MEMORY], instance.hypervisor)
2644

    
2645
  def Exec(self, feedback_fn):
2646
    """Start the instance.
2647

2648
    """
2649
    instance = self.instance
2650
    force = self.op.force
2651
    extra_args = getattr(self.op, "extra_args", "")
2652

    
2653
    self.cfg.MarkInstanceUp(instance.name)
2654

    
2655
    node_current = instance.primary_node
2656

    
2657
    _StartInstanceDisks(self, instance, force)
2658

    
2659
    result = self.rpc.call_instance_start(node_current, instance, extra_args)
2660
    msg = result.RemoteFailMsg()
2661
    if msg:
2662
      _ShutdownInstanceDisks(self, instance)
2663
      raise errors.OpExecError("Could not start instance: %s" % msg)
2664

    
2665

    
2666
class LURebootInstance(LogicalUnit):
2667
  """Reboot an instance.
2668

2669
  """
2670
  HPATH = "instance-reboot"
2671
  HTYPE = constants.HTYPE_INSTANCE
2672
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2673
  REQ_BGL = False
2674

    
2675
  def ExpandNames(self):
2676
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2677
                                   constants.INSTANCE_REBOOT_HARD,
2678
                                   constants.INSTANCE_REBOOT_FULL]:
2679
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2680
                                  (constants.INSTANCE_REBOOT_SOFT,
2681
                                   constants.INSTANCE_REBOOT_HARD,
2682
                                   constants.INSTANCE_REBOOT_FULL))
2683
    self._ExpandAndLockInstance()
2684

    
2685
  def BuildHooksEnv(self):
2686
    """Build hooks env.
2687

2688
    This runs on master, primary and secondary nodes of the instance.
2689

2690
    """
2691
    env = {
2692
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2693
      }
2694
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2695
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2696
    return env, nl, nl
2697

    
2698
  def CheckPrereq(self):
2699
    """Check prerequisites.
2700

2701
    This checks that the instance is in the cluster.
2702

2703
    """
2704
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2705
    assert self.instance is not None, \
2706
      "Cannot retrieve locked instance %s" % self.op.instance_name
2707

    
2708
    _CheckNodeOnline(self, instance.primary_node)
2709

    
2710
    # check bridges existance
2711
    _CheckInstanceBridgesExist(self, instance)
2712

    
2713
  def Exec(self, feedback_fn):
2714
    """Reboot the instance.
2715

2716
    """
2717
    instance = self.instance
2718
    ignore_secondaries = self.op.ignore_secondaries
2719
    reboot_type = self.op.reboot_type
2720
    extra_args = getattr(self.op, "extra_args", "")
2721

    
2722
    node_current = instance.primary_node
2723

    
2724
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2725
                       constants.INSTANCE_REBOOT_HARD]:
2726
      result = self.rpc.call_instance_reboot(node_current, instance,
2727
                                             reboot_type, extra_args)
2728
      if result.failed or not result.data:
2729
        raise errors.OpExecError("Could not reboot instance")
2730
    else:
2731
      if not self.rpc.call_instance_shutdown(node_current, instance):
2732
        raise errors.OpExecError("could not shutdown instance for full reboot")
2733
      _ShutdownInstanceDisks(self, instance)
2734
      _StartInstanceDisks(self, instance, ignore_secondaries)
2735
      result = self.rpc.call_instance_start(node_current, instance, extra_args)
2736
      msg = result.RemoteFailMsg()
2737
      if msg:
2738
        _ShutdownInstanceDisks(self, instance)
2739
        raise errors.OpExecError("Could not start instance for"
2740
                                 " full reboot: %s" % msg)
2741

    
2742
    self.cfg.MarkInstanceUp(instance.name)
2743

    
2744

    
2745
class LUShutdownInstance(LogicalUnit):
2746
  """Shutdown an instance.
2747

2748
  """
2749
  HPATH = "instance-stop"
2750
  HTYPE = constants.HTYPE_INSTANCE
2751
  _OP_REQP = ["instance_name"]
2752
  REQ_BGL = False
2753

    
2754
  def ExpandNames(self):
2755
    self._ExpandAndLockInstance()
2756

    
2757
  def BuildHooksEnv(self):
2758
    """Build hooks env.
2759

2760
    This runs on master, primary and secondary nodes of the instance.
2761

2762
    """
2763
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2764
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2765
    return env, nl, nl
2766

    
2767
  def CheckPrereq(self):
2768
    """Check prerequisites.
2769

2770
    This checks that the instance is in the cluster.
2771

2772
    """
2773
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2774
    assert self.instance is not None, \
2775
      "Cannot retrieve locked instance %s" % self.op.instance_name
2776
    _CheckNodeOnline(self, self.instance.primary_node)
2777

    
2778
  def Exec(self, feedback_fn):
2779
    """Shutdown the instance.
2780

2781
    """
2782
    instance = self.instance
2783
    node_current = instance.primary_node
2784
    self.cfg.MarkInstanceDown(instance.name)
2785
    result = self.rpc.call_instance_shutdown(node_current, instance)
2786
    if result.failed or not result.data:
2787
      self.proc.LogWarning("Could not shutdown instance")
2788

    
2789
    _ShutdownInstanceDisks(self, instance)
2790

    
2791

    
2792
class LUReinstallInstance(LogicalUnit):
2793
  """Reinstall an instance.
2794

2795
  """
2796
  HPATH = "instance-reinstall"
2797
  HTYPE = constants.HTYPE_INSTANCE
2798
  _OP_REQP = ["instance_name"]
2799
  REQ_BGL = False
2800

    
2801
  def ExpandNames(self):
2802
    self._ExpandAndLockInstance()
2803

    
2804
  def BuildHooksEnv(self):
2805
    """Build hooks env.
2806

2807
    This runs on master, primary and secondary nodes of the instance.
2808

2809
    """
2810
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2811
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2812
    return env, nl, nl
2813

    
2814
  def CheckPrereq(self):
2815
    """Check prerequisites.
2816

2817
    This checks that the instance is in the cluster and is not running.
2818

2819
    """
2820
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2821
    assert instance is not None, \
2822
      "Cannot retrieve locked instance %s" % self.op.instance_name
2823
    _CheckNodeOnline(self, instance.primary_node)
2824

    
2825
    if instance.disk_template == constants.DT_DISKLESS:
2826
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2827
                                 self.op.instance_name)
2828
    if instance.admin_up:
2829
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2830
                                 self.op.instance_name)
2831
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2832
                                              instance.name,
2833
                                              instance.hypervisor)
2834
    if remote_info.failed or remote_info.data:
2835
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2836
                                 (self.op.instance_name,
2837
                                  instance.primary_node))
2838

    
2839
    self.op.os_type = getattr(self.op, "os_type", None)
2840
    if self.op.os_type is not None:
2841
      # OS verification
2842
      pnode = self.cfg.GetNodeInfo(
2843
        self.cfg.ExpandNodeName(instance.primary_node))
2844
      if pnode is None:
2845
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2846
                                   self.op.pnode)
2847
      result = self.rpc.call_os_get(pnode.name, self.op.os_type)
2848
      result.Raise()
2849
      if not isinstance(result.data, objects.OS):
2850
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2851
                                   " primary node"  % self.op.os_type)
2852

    
2853
    self.instance = instance
2854

    
2855
  def Exec(self, feedback_fn):
2856
    """Reinstall the instance.
2857

2858
    """
2859
    inst = self.instance
2860

    
2861
    if self.op.os_type is not None:
2862
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2863
      inst.os = self.op.os_type
2864
      self.cfg.Update(inst)
2865

    
2866
    _StartInstanceDisks(self, inst, None)
2867
    try:
2868
      feedback_fn("Running the instance OS create scripts...")
2869
      result = self.rpc.call_instance_os_add(inst.primary_node, inst)
2870
      msg = result.RemoteFailMsg()
2871
      if msg:
2872
        raise errors.OpExecError("Could not install OS for instance %s"
2873
                                 " on node %s: %s" %
2874
                                 (inst.name, inst.primary_node, msg))
2875
    finally:
2876
      _ShutdownInstanceDisks(self, inst)
2877

    
2878

    
2879
class LURenameInstance(LogicalUnit):
2880
  """Rename an instance.
2881

2882
  """
2883
  HPATH = "instance-rename"
2884
  HTYPE = constants.HTYPE_INSTANCE
2885
  _OP_REQP = ["instance_name", "new_name"]
2886

    
2887
  def BuildHooksEnv(self):
2888
    """Build hooks env.
2889

2890
    This runs on master, primary and secondary nodes of the instance.
2891

2892
    """
2893
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2894
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2895
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2896
    return env, nl, nl
2897

    
2898
  def CheckPrereq(self):
2899
    """Check prerequisites.
2900

2901
    This checks that the instance is in the cluster and is not running.
2902

2903
    """
2904
    instance = self.cfg.GetInstanceInfo(
2905
      self.cfg.ExpandInstanceName(self.op.instance_name))
2906
    if instance is None:
2907
      raise errors.OpPrereqError("Instance '%s' not known" %
2908
                                 self.op.instance_name)
2909
    _CheckNodeOnline(self, instance.primary_node)
2910

    
2911
    if instance.admin_up:
2912
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2913
                                 self.op.instance_name)
2914
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2915
                                              instance.name,
2916
                                              instance.hypervisor)
2917
    remote_info.Raise()
2918
    if remote_info.data:
2919
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2920
                                 (self.op.instance_name,
2921
                                  instance.primary_node))
2922
    self.instance = instance
2923

    
2924
    # new name verification
2925
    name_info = utils.HostInfo(self.op.new_name)
2926

    
2927
    self.op.new_name = new_name = name_info.name
2928
    instance_list = self.cfg.GetInstanceList()
2929
    if new_name in instance_list:
2930
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2931
                                 new_name)
2932

    
2933
    if not getattr(self.op, "ignore_ip", False):
2934
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
2935
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2936
                                   (name_info.ip, new_name))
2937

    
2938

    
2939
  def Exec(self, feedback_fn):
2940
    """Reinstall the instance.
2941

2942
    """
2943
    inst = self.instance
2944
    old_name = inst.name
2945

    
2946
    if inst.disk_template == constants.DT_FILE:
2947
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2948

    
2949
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2950
    # Change the instance lock. This is definitely safe while we hold the BGL
2951
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
2952
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
2953

    
2954
    # re-read the instance from the configuration after rename
2955
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2956

    
2957
    if inst.disk_template == constants.DT_FILE:
2958
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2959
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
2960
                                                     old_file_storage_dir,
2961
                                                     new_file_storage_dir)
2962
      result.Raise()
2963
      if not result.data:
2964
        raise errors.OpExecError("Could not connect to node '%s' to rename"
2965
                                 " directory '%s' to '%s' (but the instance"
2966
                                 " has been renamed in Ganeti)" % (
2967
                                 inst.primary_node, old_file_storage_dir,
2968
                                 new_file_storage_dir))
2969

    
2970
      if not result.data[0]:
2971
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2972
                                 " (but the instance has been renamed in"
2973
                                 " Ganeti)" % (old_file_storage_dir,
2974
                                               new_file_storage_dir))
2975

    
2976
    _StartInstanceDisks(self, inst, None)
2977
    try:
2978
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
2979
                                                 old_name)
2980
      msg = result.RemoteFailMsg()
2981
      if msg:
2982
        msg = ("Could not run OS rename script for instance %s on node %s"
2983
               " (but the instance has been renamed in Ganeti): %s" %
2984
               (inst.name, inst.primary_node, msg))
2985
        self.proc.LogWarning(msg)
2986
    finally:
2987
      _ShutdownInstanceDisks(self, inst)
2988

    
2989

    
2990
class LURemoveInstance(LogicalUnit):
2991
  """Remove an instance.
2992

2993
  """
2994
  HPATH = "instance-remove"
2995
  HTYPE = constants.HTYPE_INSTANCE
2996
  _OP_REQP = ["instance_name", "ignore_failures"]
2997
  REQ_BGL = False
2998

    
2999
  def ExpandNames(self):
3000
    self._ExpandAndLockInstance()
3001
    self.needed_locks[locking.LEVEL_NODE] = []
3002
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3003

    
3004
  def DeclareLocks(self, level):
3005
    if level == locking.LEVEL_NODE:
3006
      self._LockInstancesNodes()
3007

    
3008
  def BuildHooksEnv(self):
3009
    """Build hooks env.
3010

3011
    This runs on master, primary and secondary nodes of the instance.
3012

3013
    """
3014
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3015
    nl = [self.cfg.GetMasterNode()]
3016
    return env, nl, nl
3017

    
3018
  def CheckPrereq(self):
3019
    """Check prerequisites.
3020

3021
    This checks that the instance is in the cluster.
3022

3023
    """
3024
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3025
    assert self.instance is not None, \
3026
      "Cannot retrieve locked instance %s" % self.op.instance_name
3027

    
3028
  def Exec(self, feedback_fn):
3029
    """Remove the instance.
3030

3031
    """
3032
    instance = self.instance
3033
    logging.info("Shutting down instance %s on node %s",
3034
                 instance.name, instance.primary_node)
3035

    
3036
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
3037
    if result.failed or not result.data:
3038
      if self.op.ignore_failures:
3039
        feedback_fn("Warning: can't shutdown instance")
3040
      else:
3041
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
3042
                                 (instance.name, instance.primary_node))
3043

    
3044
    logging.info("Removing block devices for instance %s", instance.name)
3045

    
3046
    if not _RemoveDisks(self, instance):
3047
      if self.op.ignore_failures:
3048
        feedback_fn("Warning: can't remove instance's disks")
3049
      else:
3050
        raise errors.OpExecError("Can't remove instance's disks")
3051

    
3052
    logging.info("Removing instance %s out of cluster config", instance.name)
3053

    
3054
    self.cfg.RemoveInstance(instance.name)
3055
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
3056

    
3057

    
3058
class LUQueryInstances(NoHooksLU):
3059
  """Logical unit for querying instances.
3060

3061
  """
3062
  _OP_REQP = ["output_fields", "names"]
3063
  REQ_BGL = False
3064
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
3065
                                    "admin_state", "admin_ram",
3066
                                    "disk_template", "ip", "mac", "bridge",
3067
                                    "sda_size", "sdb_size", "vcpus", "tags",
3068
                                    "network_port", "beparams",
3069
                                    "(disk).(size)/([0-9]+)",
3070
                                    "(disk).(sizes)",
3071
                                    "(nic).(mac|ip|bridge)/([0-9]+)",
3072
                                    "(nic).(macs|ips|bridges)",
3073
                                    "(disk|nic).(count)",
3074
                                    "serial_no", "hypervisor", "hvparams",] +
3075
                                  ["hv/%s" % name
3076
                                   for name in constants.HVS_PARAMETERS] +
3077
                                  ["be/%s" % name
3078
                                   for name in constants.BES_PARAMETERS])
3079
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
3080

    
3081

    
3082
  def ExpandNames(self):
3083
    _CheckOutputFields(static=self._FIELDS_STATIC,
3084
                       dynamic=self._FIELDS_DYNAMIC,
3085
                       selected=self.op.output_fields)
3086

    
3087
    self.needed_locks = {}
3088
    self.share_locks[locking.LEVEL_INSTANCE] = 1
3089
    self.share_locks[locking.LEVEL_NODE] = 1
3090

    
3091
    if self.op.names:
3092
      self.wanted = _GetWantedInstances(self, self.op.names)
3093
    else:
3094
      self.wanted = locking.ALL_SET
3095

    
3096
    self.do_locking = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
3097
    if self.do_locking:
3098
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3099
      self.needed_locks[locking.LEVEL_NODE] = []
3100
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3101

    
3102
  def DeclareLocks(self, level):
3103
    if level == locking.LEVEL_NODE and self.do_locking:
3104
      self._LockInstancesNodes()
3105

    
3106
  def CheckPrereq(self):
3107
    """Check prerequisites.
3108

3109
    """
3110
    pass
3111

    
3112
  def Exec(self, feedback_fn):
3113
    """Computes the list of nodes and their attributes.
3114

3115
    """
3116
    all_info = self.cfg.GetAllInstancesInfo()
3117
    if self.do_locking:
3118
      instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
3119
    elif self.wanted != locking.ALL_SET:
3120
      instance_names = self.wanted
3121
      missing = set(instance_names).difference(all_info.keys())
3122
      if missing:
3123
        raise errors.OpExecError(
3124
          "Some instances were removed before retrieving their data: %s"
3125
          % missing)
3126
    else:
3127
      instance_names = all_info.keys()
3128

    
3129
    instance_names = utils.NiceSort(instance_names)
3130
    instance_list = [all_info[iname] for iname in instance_names]
3131

    
3132
    # begin data gathering
3133

    
3134
    nodes = frozenset([inst.primary_node for inst in instance_list])
3135
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
3136

    
3137
    bad_nodes = []
3138
    off_nodes = []
3139
    if self.do_locking:
3140
      live_data = {}
3141
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
3142
      for name in nodes:
3143
        result = node_data[name]
3144
        if result.offline:
3145
          # offline nodes will be in both lists
3146
          off_nodes.append(name)
3147
        if result.failed:
3148
          bad_nodes.append(name)
3149
        else:
3150
          if result.data:
3151
            live_data.update(result.data)
3152
            # else no instance is alive
3153
    else:
3154
      live_data = dict([(name, {}) for name in instance_names])
3155

    
3156
    # end data gathering
3157

    
3158
    HVPREFIX = "hv/"
3159
    BEPREFIX = "be/"
3160
    output = []
3161
    for instance in instance_list:
3162
      iout = []
3163
      i_hv = self.cfg.GetClusterInfo().FillHV(instance)
3164
      i_be = self.cfg.GetClusterInfo().FillBE(instance)
3165
      for field in self.op.output_fields:
3166
        st_match = self._FIELDS_STATIC.Matches(field)
3167
        if field == "name":
3168
          val = instance.name
3169
        elif field == "os":
3170
          val = instance.os
3171
        elif field == "pnode":
3172
          val = instance.primary_node
3173
        elif field == "snodes":
3174
          val = list(instance.secondary_nodes)
3175
        elif field == "admin_state":
3176
          val = instance.admin_up
3177
        elif field == "oper_state":
3178
          if instance.primary_node in bad_nodes:
3179
            val = None
3180
          else:
3181
            val = bool(live_data.get(instance.name))
3182
        elif field == "status":
3183
          if instance.primary_node in off_nodes:
3184
            val = "ERROR_nodeoffline"
3185
          elif instance.primary_node in bad_nodes:
3186
            val = "ERROR_nodedown"
3187
          else:
3188
            running = bool(live_data.get(instance.name))
3189
            if running:
3190
              if instance.admin_up:
3191
                val = "running"
3192
              else:
3193
                val = "ERROR_up"
3194
            else:
3195
              if instance.admin_up:
3196
                val = "ERROR_down"
3197
              else:
3198
                val = "ADMIN_down"
3199
        elif field == "oper_ram":
3200
          if instance.primary_node in bad_nodes:
3201
            val = None
3202
          elif instance.name in live_data:
3203
            val = live_data[instance.name].get("memory", "?")
3204
          else:
3205
            val = "-"
3206
        elif field == "disk_template":
3207
          val = instance.disk_template
3208
        elif field == "ip":
3209
          val = instance.nics[0].ip
3210
        elif field == "bridge":
3211
          val = instance.nics[0].bridge
3212
        elif field == "mac":
3213
          val = instance.nics[0].mac
3214
        elif field == "sda_size" or field == "sdb_size":
3215
          idx = ord(field[2]) - ord('a')
3216
          try:
3217
            val = instance.FindDisk(idx).size
3218
          except errors.OpPrereqError:
3219
            val = None
3220
        elif field == "tags":
3221
          val = list(instance.GetTags())
3222
        elif field == "serial_no":
3223
          val = instance.serial_no
3224
        elif field == "network_port":
3225
          val = instance.network_port
3226
        elif field == "hypervisor":
3227
          val = instance.hypervisor
3228
        elif field == "hvparams":
3229
          val = i_hv
3230
        elif (field.startswith(HVPREFIX) and
3231
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
3232
          val = i_hv.get(field[len(HVPREFIX):], None)
3233
        elif field == "beparams":
3234
          val = i_be
3235
        elif (field.startswith(BEPREFIX) and
3236
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
3237
          val = i_be.get(field[len(BEPREFIX):], None)
3238
        elif st_match and st_match.groups():
3239
          # matches a variable list
3240
          st_groups = st_match.groups()
3241
          if st_groups and st_groups[0] == "disk":
3242
            if st_groups[1] == "count":
3243
              val = len(instance.disks)
3244
            elif st_groups[1] == "sizes":
3245
              val = [disk.size for disk in instance.disks]
3246
            elif st_groups[1] == "size":
3247
              try:
3248
                val = instance.FindDisk(st_groups[2]).size
3249
              except errors.OpPrereqError:
3250
                val = None
3251
            else:
3252
              assert False, "Unhandled disk parameter"
3253
          elif st_groups[0] == "nic":
3254
            if st_groups[1] == "count":
3255
              val = len(instance.nics)
3256
            elif st_groups[1] == "macs":
3257
              val = [nic.mac for nic in instance.nics]
3258
            elif st_groups[1] == "ips":
3259
              val = [nic.ip for nic in instance.nics]
3260
            elif st_groups[1] == "bridges":
3261
              val = [nic.bridge for nic in instance.nics]
3262
            else:
3263
              # index-based item
3264
              nic_idx = int(st_groups[2])
3265
              if nic_idx >= len(instance.nics):
3266
                val = None
3267
              else:
3268
                if st_groups[1] == "mac":
3269
                  val = instance.nics[nic_idx].mac
3270
                elif st_groups[1] == "ip":
3271
                  val = instance.nics[nic_idx].ip
3272
                elif st_groups[1] == "bridge":
3273
                  val = instance.nics[nic_idx].bridge
3274
                else:
3275
                  assert False, "Unhandled NIC parameter"
3276
          else:
3277
            assert False, "Unhandled variable parameter"
3278
        else:
3279
          raise errors.ParameterError(field)
3280
        iout.append(val)
3281
      output.append(iout)
3282

    
3283
    return output
3284

    
3285

    
3286
class LUFailoverInstance(LogicalUnit):
3287
  """Failover an instance.
3288

3289
  """
3290
  HPATH = "instance-failover"
3291
  HTYPE = constants.HTYPE_INSTANCE
3292
  _OP_REQP = ["instance_name", "ignore_consistency"]
3293
  REQ_BGL = False
3294

    
3295
  def ExpandNames(self):
3296
    self._ExpandAndLockInstance()
3297
    self.needed_locks[locking.LEVEL_NODE] = []
3298
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3299

    
3300
  def DeclareLocks(self, level):
3301
    if level == locking.LEVEL_NODE:
3302
      self._LockInstancesNodes()
3303

    
3304
  def BuildHooksEnv(self):
3305
    """Build hooks env.
3306

3307
    This runs on master, primary and secondary nodes of the instance.
3308

3309
    """
3310
    env = {
3311
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
3312
      }
3313
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3314
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3315
    return env, nl, nl
3316

    
3317
  def CheckPrereq(self):
3318
    """Check prerequisites.
3319

3320
    This checks that the instance is in the cluster.
3321

3322
    """
3323
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3324
    assert self.instance is not None, \
3325
      "Cannot retrieve locked instance %s" % self.op.instance_name
3326

    
3327
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3328
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3329
      raise errors.OpPrereqError("Instance's disk layout is not"
3330
                                 " network mirrored, cannot failover.")
3331

    
3332
    secondary_nodes = instance.secondary_nodes
3333
    if not secondary_nodes:
3334
      raise errors.ProgrammerError("no secondary node but using "
3335
                                   "a mirrored disk template")
3336

    
3337
    target_node = secondary_nodes[0]
3338
    _CheckNodeOnline(self, target_node)
3339
    # check memory requirements on the secondary node
3340
    _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
3341
                         instance.name, bep[constants.BE_MEMORY],
3342
                         instance.hypervisor)
3343

    
3344
    # check bridge existance
3345
    brlist = [nic.bridge for nic in instance.nics]
3346
    result = self.rpc.call_bridges_exist(target_node, brlist)
3347
    result.Raise()
3348
    if not result.data:
3349
      raise errors.OpPrereqError("One or more target bridges %s does not"
3350
                                 " exist on destination node '%s'" %
3351
                                 (brlist, target_node))
3352

    
3353
  def Exec(self, feedback_fn):
3354
    """Failover an instance.
3355

3356
    The failover is done by shutting it down on its present node and
3357
    starting it on the secondary.
3358

3359
    """
3360
    instance = self.instance
3361

    
3362
    source_node = instance.primary_node
3363
    target_node = instance.secondary_nodes[0]
3364

    
3365
    feedback_fn("* checking disk consistency between source and target")
3366
    for dev in instance.disks:
3367
      # for drbd, these are drbd over lvm
3368
      if not _CheckDiskConsistency(self, dev, target_node, False):
3369
        if instance.admin_up and not self.op.ignore_consistency:
3370
          raise errors.OpExecError("Disk %s is degraded on target node,"
3371
                                   " aborting failover." % dev.iv_name)
3372

    
3373
    feedback_fn("* shutting down instance on source node")
3374
    logging.info("Shutting down instance %s on node %s",
3375
                 instance.name, source_node)
3376

    
3377
    result = self.rpc.call_instance_shutdown(source_node, instance)
3378
    if result.failed or not result.data:
3379
      if self.op.ignore_consistency:
3380
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
3381
                             " Proceeding"
3382
                             " anyway. Please make sure node %s is down",
3383
                             instance.name, source_node, source_node)
3384
      else:
3385
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
3386
                                 (instance.name, source_node))
3387

    
3388
    feedback_fn("* deactivating the instance's disks on source node")
3389
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3390
      raise errors.OpExecError("Can't shut down the instance's disks.")
3391

    
3392
    instance.primary_node = target_node
3393
    # distribute new instance config to the other nodes
3394
    self.cfg.Update(instance)
3395

    
3396
    # Only start the instance if it's marked as up
3397
    if instance.admin_up:
3398
      feedback_fn("* activating the instance's disks on target node")
3399
      logging.info("Starting instance %s on node %s",
3400
                   instance.name, target_node)
3401

    
3402
      disks_ok, dummy = _AssembleInstanceDisks(self, instance,
3403
                                               ignore_secondaries=True)
3404
      if not disks_ok:
3405
        _ShutdownInstanceDisks(self, instance)
3406
        raise errors.OpExecError("Can't activate the instance's disks")
3407

    
3408
      feedback_fn("* starting the instance on the target node")
3409
      result = self.rpc.call_instance_start(target_node, instance, None)
3410
      msg = result.RemoteFailMsg()
3411
      if msg:
3412
        _ShutdownInstanceDisks(self, instance)
3413
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
3414
                                 (instance.name, target_node, msg))
3415

    
3416

    
3417
class LUMigrateInstance(LogicalUnit):
3418
  """Migrate an instance.
3419

3420
  This is migration without shutting down, compared to the failover,
3421
  which is done with shutdown.
3422

3423
  """
3424
  HPATH = "instance-migrate"
3425
  HTYPE = constants.HTYPE_INSTANCE
3426
  _OP_REQP = ["instance_name", "live", "cleanup"]
3427

    
3428
  REQ_BGL = False
3429

    
3430
  def ExpandNames(self):
3431
    self._ExpandAndLockInstance()
3432
    self.needed_locks[locking.LEVEL_NODE] = []
3433
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3434

    
3435
  def DeclareLocks(self, level):
3436
    if level == locking.LEVEL_NODE:
3437
      self._LockInstancesNodes()
3438

    
3439
  def BuildHooksEnv(self):
3440
    """Build hooks env.
3441

3442
    This runs on master, primary and secondary nodes of the instance.
3443

3444
    """
3445
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3446
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3447
    return env, nl, nl
3448

    
3449
  def CheckPrereq(self):
3450
    """Check prerequisites.
3451

3452
    This checks that the instance is in the cluster.
3453

3454
    """
3455
    instance = self.cfg.GetInstanceInfo(
3456
      self.cfg.ExpandInstanceName(self.op.instance_name))
3457
    if instance is None:
3458
      raise errors.OpPrereqError("Instance '%s' not known" %
3459
                                 self.op.instance_name)
3460

    
3461
    if instance.disk_template != constants.DT_DRBD8:
3462
      raise errors.OpPrereqError("Instance's disk layout is not"
3463
                                 " drbd8, cannot migrate.")
3464

    
3465
    secondary_nodes = instance.secondary_nodes
3466
    if not secondary_nodes:
3467
      raise errors.ProgrammerError("no secondary node but using "
3468
                                   "drbd8 disk template")
3469

    
3470
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
3471

    
3472
    target_node = secondary_nodes[0]
3473
    # check memory requirements on the secondary node
3474
    _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
3475
                         instance.name, i_be[constants.BE_MEMORY],
3476
                         instance.hypervisor)
3477

    
3478
    # check bridge existance
3479
    brlist = [nic.bridge for nic in instance.nics]
3480
    result = self.rpc.call_bridges_exist(target_node, brlist)
3481
    if result.failed or not result.data:
3482
      raise errors.OpPrereqError("One or more target bridges %s does not"
3483
                                 " exist on destination node '%s'" %
3484
                                 (brlist, target_node))
3485

    
3486
    if not self.op.cleanup:
3487
      result = self.rpc.call_instance_migratable(instance.primary_node,
3488
                                                 instance)
3489
      msg = result.RemoteFailMsg()
3490
      if msg:
3491
        raise errors.OpPrereqError("Can't migrate: %s - please use failover" %
3492
                                   msg)
3493

    
3494
    self.instance = instance
3495

    
3496
  def _WaitUntilSync(self):
3497
    """Poll with custom rpc for disk sync.
3498

3499
    This uses our own step-based rpc call.
3500

3501
    """
3502
    self.feedback_fn("* wait until resync is done")
3503
    all_done = False
3504
    while not all_done:
3505
      all_done = True
3506
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
3507
                                            self.nodes_ip,
3508
                                            self.instance.disks)
3509
      min_percent = 100
3510
      for node, nres in result.items():
3511
        msg = nres.RemoteFailMsg()
3512
        if msg:
3513
          raise errors.OpExecError("Cannot resync disks on node %s: %s" %
3514
                                   (node, msg))
3515
        node_done, node_percent = nres.data[1]
3516
        all_done = all_done and node_done
3517
        if node_percent is not None:
3518
          min_percent = min(min_percent, node_percent)
3519
      if not all_done:
3520
        if min_percent < 100:
3521
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
3522
        time.sleep(2)
3523

    
3524
  def _EnsureSecondary(self, node):
3525
    """Demote a node to secondary.
3526

3527
    """
3528
    self.feedback_fn("* switching node %s to secondary mode" % node)
3529

    
3530
    for dev in self.instance.disks:
3531
      self.cfg.SetDiskID(dev, node)
3532

    
3533
    result = self.rpc.call_blockdev_close(node, self.instance.name,
3534
                                          self.instance.disks)
3535
    msg = result.RemoteFailMsg()
3536
    if msg:
3537
      raise errors.OpExecError("Cannot change disk to secondary on node %s,"
3538
                               " error %s" % (node, msg))
3539

    
3540
  def _GoStandalone(self):
3541
    """Disconnect from the network.
3542

3543
    """
3544
    self.feedback_fn("* changing into standalone mode")
3545
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
3546
                                               self.instance.disks)
3547
    for node, nres in result.items():
3548
      msg = nres.RemoteFailMsg()
3549
      if msg:
3550
        raise errors.OpExecError("Cannot disconnect disks node %s,"
3551
                                 " error %s" % (node, msg))
3552

    
3553
  def _GoReconnect(self, multimaster):
3554
    """Reconnect to the network.
3555

3556
    """
3557
    if multimaster:
3558
      msg = "dual-master"
3559
    else:
3560
      msg = "single-master"
3561
    self.feedback_fn("* changing disks into %s mode" % msg)
3562
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
3563
                                           self.instance.disks,
3564
                                           self.instance.name, multimaster)
3565
    for node, nres in result.items():
3566
      msg = nres.RemoteFailMsg()
3567
      if msg:
3568
        raise errors.OpExecError("Cannot change disks config on node %s,"
3569
                                 " error: %s" % (node, msg))
3570

    
3571
  def _ExecCleanup(self):
3572
    """Try to cleanup after a failed migration.
3573

3574
    The cleanup is done by:
3575
      - check that the instance is running only on one node
3576
        (and update the config if needed)
3577
      - change disks on its secondary node to secondary
3578
      - wait until disks are fully synchronized
3579
      - disconnect from the network
3580
      - change disks into single-master mode
3581
      - wait again until disks are fully synchronized
3582

3583
    """
3584
    instance = self.instance
3585
    target_node = self.target_node
3586
    source_node = self.source_node
3587

    
3588
    # check running on only one node
3589
    self.feedback_fn("* checking where the instance actually runs"
3590
                     " (if this hangs, the hypervisor might be in"
3591
                     " a bad state)")
3592
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
3593
    for node, result in ins_l.items():
3594
      result.Raise()
3595
      if not isinstance(result.data, list):
3596
        raise errors.OpExecError("Can't contact node '%s'" % node)
3597

    
3598
    runningon_source = instance.name in ins_l[source_node].data
3599
    runningon_target = instance.name in ins_l[target_node].data
3600

    
3601
    if runningon_source and runningon_target:
3602
      raise errors.OpExecError("Instance seems to be running on two nodes,"
3603
                               " or the hypervisor is confused. You will have"
3604
                               " to ensure manually that it runs only on one"
3605
                               " and restart this operation.")
3606

    
3607
    if not (runningon_source or runningon_target):
3608
      raise errors.OpExecError("Instance does not seem to be running at all."
3609
                               " In this case, it's safer to repair by"
3610
                               " running 'gnt-instance stop' to ensure disk"
3611
                               " shutdown, and then restarting it.")
3612

    
3613
    if runningon_target:
3614
      # the migration has actually succeeded, we need to update the config
3615
      self.feedback_fn("* instance running on secondary node (%s),"
3616
                       " updating config" % target_node)
3617
      instance.primary_node = target_node
3618
      self.cfg.Update(instance)
3619
      demoted_node = source_node
3620
    else:
3621
      self.feedback_fn("* instance confirmed to be running on its"
3622
                       " primary node (%s)" % source_node)
3623
      demoted_node = target_node
3624

    
3625
    self._EnsureSecondary(demoted_node)
3626
    try:
3627
      self._WaitUntilSync()
3628
    except errors.OpExecError:
3629
      # we ignore here errors, since if the device is standalone, it
3630
      # won't be able to sync
3631
      pass
3632
    self._GoStandalone()
3633
    self._GoReconnect(False)
3634
    self._WaitUntilSync()
3635

    
3636
    self.feedback_fn("* done")
3637

    
3638
  def _RevertDiskStatus(self):
3639
    """Try to revert the disk status after a failed migration.
3640

3641
    """
3642
    target_node = self.target_node
3643
    try:
3644
      self._EnsureSecondary(target_node)
3645
      self._GoStandalone()
3646
      self._GoReconnect(False)
3647
      self._WaitUntilSync()
3648
    except errors.OpExecError, err:
3649
      self.LogWarning("Migration failed and I can't reconnect the"
3650
                      " drives: error '%s'\n"
3651
                      "Please look and recover the instance status" %
3652
                      str(err))
3653

    
3654
  def _AbortMigration(self):
3655
    """Call the hypervisor code to abort a started migration.
3656

3657
    """
3658
    instance = self.instance
3659
    target_node = self.target_node
3660
    migration_info = self.migration_info
3661

    
3662
    abort_result = self.rpc.call_finalize_migration(target_node,
3663
                                                    instance,
3664
                                                    migration_info,
3665
                                                    False)
3666
    abort_msg = abort_result.RemoteFailMsg()
3667
    if abort_msg:
3668
      logging.error("Aborting migration failed on target node %s: %s" %
3669
                    (target_node, abort_msg))
3670
      # Don't raise an exception here, as we stil have to try to revert the
3671
      # disk status, even if this step failed.
3672

    
3673
  def _ExecMigration(self):
3674
    """Migrate an instance.
3675

3676
    The migrate is done by:
3677
      - change the disks into dual-master mode
3678
      - wait until disks are fully synchronized again
3679
      - migrate the instance
3680
      - change disks on the new secondary node (the old primary) to secondary
3681
      - wait until disks are fully synchronized
3682
      - change disks into single-master mode
3683

3684
    """
3685
    instance = self.instance
3686
    target_node = self.target_node
3687
    source_node = self.source_node
3688

    
3689
    self.feedback_fn("* checking disk consistency between source and target")
3690
    for dev in instance.disks:
3691
      if not _CheckDiskConsistency(self, dev, target_node, False):
3692
        raise errors.OpExecError("Disk %s is degraded or not fully"
3693
                                 " synchronized on target node,"
3694
                                 " aborting migrate." % dev.iv_name)
3695

    
3696
    # First get the migration information from the remote node
3697
    result = self.rpc.call_migration_info(source_node, instance)
3698
    msg = result.RemoteFailMsg()
3699
    if msg:
3700
      log_err = ("Failed fetching source migration information from %s: %s" %
3701
                  (source_node, msg))
3702
      logging.error(log_err)
3703
      raise errors.OpExecError(log_err)
3704

    
3705
    self.migration_info = migration_info = result.data[1]
3706

    
3707
    # Then switch the disks to master/master mode
3708
    self._EnsureSecondary(target_node)
3709
    self._GoStandalone()
3710
    self._GoReconnect(True)
3711
    self._WaitUntilSync()
3712

    
3713
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
3714
    result = self.rpc.call_accept_instance(target_node,
3715
                                           instance,
3716
                                           migration_info,
3717
                                           self.nodes_ip[target_node])
3718

    
3719
    msg = result.RemoteFailMsg()
3720
    if msg:
3721
      logging.error("Instance pre-migration failed, trying to revert"
3722
                    " disk status: %s", msg)
3723
      self._AbortMigration()
3724
      self._RevertDiskStatus()
3725
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
3726
                               (instance.name, msg))
3727

    
3728
    self.feedback_fn("* migrating instance to %s" % target_node)
3729
    time.sleep(10)
3730
    result = self.rpc.call_instance_migrate(source_node, instance,
3731
                                            self.nodes_ip[target_node],
3732
                                            self.op.live)
3733
    msg = result.RemoteFailMsg()
3734
    if msg:
3735
      logging.error("Instance migration failed, trying to revert"
3736
                    " disk status: %s", msg)
3737
      self._AbortMigration()
3738
      self._RevertDiskStatus()
3739
      raise errors.OpExecError("Could not migrate instance %s: %s" %
3740
                               (instance.name, msg))
3741
    time.sleep(10)
3742

    
3743
    instance.primary_node = target_node
3744
    # distribute new instance config to the other nodes
3745
    self.cfg.Update(instance)
3746

    
3747
    result = self.rpc.call_finalize_migration(target_node,
3748
                                              instance,
3749
                                              migration_info,
3750
                                              True)
3751
    msg = result.RemoteFailMsg()
3752
    if msg:
3753
      logging.error("Instance migration succeeded, but finalization failed:"
3754
                    " %s" % msg)
3755
      raise errors.OpExecError("Could not finalize instance migration: %s" %
3756
                               msg)
3757

    
3758
    self._EnsureSecondary(source_node)
3759
    self._WaitUntilSync()
3760
    self._GoStandalone()
3761
    self._GoReconnect(False)
3762
    self._WaitUntilSync()
3763

    
3764
    self.feedback_fn("* done")
3765

    
3766
  def Exec(self, feedback_fn):
3767
    """Perform the migration.
3768

3769
    """
3770
    self.feedback_fn = feedback_fn
3771

    
3772
    self.source_node = self.instance.primary_node
3773
    self.target_node = self.instance.secondary_nodes[0]
3774
    self.all_nodes = [self.source_node, self.target_node]
3775
    self.nodes_ip = {
3776
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
3777
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
3778
      }
3779
    if self.op.cleanup:
3780
      return self._ExecCleanup()
3781
    else:
3782
      return self._ExecMigration()
3783

    
3784

    
3785
def _CreateBlockDev(lu, node, instance, device, force_create,
3786
                    info, force_open):
3787
  """Create a tree of block devices on a given node.
3788

3789
  If this device type has to be created on secondaries, create it and
3790
  all its children.
3791

3792
  If not, just recurse to children keeping the same 'force' value.
3793

3794
  @param lu: the lu on whose behalf we execute
3795
  @param node: the node on which to create the device
3796
  @type instance: L{objects.Instance}
3797
  @param instance: the instance which owns the device
3798
  @type device: L{objects.Disk}
3799
  @param device: the device to create
3800
  @type force_create: boolean
3801
  @param force_create: whether to force creation of this device; this
3802
      will be change to True whenever we find a device which has
3803
      CreateOnSecondary() attribute
3804
  @param info: the extra 'metadata' we should attach to the device
3805
      (this will be represented as a LVM tag)
3806
  @type force_open: boolean
3807
  @param force_open: this parameter will be passes to the
3808
      L{backend.CreateBlockDevice} function where it specifies
3809
      whether we run on primary or not, and it affects both
3810
      the child assembly and the device own Open() execution
3811

3812
  """
3813
  if device.CreateOnSecondary():
3814
    force_create = True
3815

    
3816
  if device.children:
3817
    for child in device.children:
3818
      _CreateBlockDev(lu, node, instance, child, force_create,
3819
                      info, force_open)
3820

    
3821
  if not force_create:
3822
    return
3823

    
3824
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
3825

    
3826

    
3827
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
3828
  """Create a single block device on a given node.
3829

3830
  This will not recurse over children of the device, so they must be
3831
  created in advance.
3832

3833
  @param lu: the lu on whose behalf we execute
3834
  @param node: the node on which to create the device
3835
  @type instance: L{objects.Instance}
3836
  @param instance: the instance which owns the device
3837
  @type device: L{objects.Disk}
3838
  @param device: the device to create
3839
  @param info: the extra 'metadata' we should attach to the device
3840
      (this will be represented as a LVM tag)
3841
  @type force_open: boolean
3842
  @param force_open: this parameter will be passes to the
3843
      L{backend.CreateBlockDevice} function where it specifies
3844
      whether we run on primary or not, and it affects both
3845
      the child assembly and the device own Open() execution
3846

3847
  """
3848
  lu.cfg.SetDiskID(device, node)
3849
  result = lu.rpc.call_blockdev_create(node, device, device.size,
3850
                                       instance.name, force_open, info)
3851
  msg = result.RemoteFailMsg()
3852
  if msg:
3853
    raise errors.OpExecError("Can't create block device %s on"
3854
                             " node %s for instance %s: %s" %
3855
                             (device, node, instance.name, msg))
3856
  if device.physical_id is None:
3857
    device.physical_id = result.data[1]
3858

    
3859

    
3860
def _GenerateUniqueNames(lu, exts):
3861
  """Generate a suitable LV name.
3862

3863
  This will generate a logical volume name for the given instance.
3864

3865
  """
3866
  results = []
3867
  for val in exts:
3868
    new_id = lu.cfg.GenerateUniqueID()
3869
    results.append("%s%s" % (new_id, val))
3870
  return results
3871

    
3872

    
3873
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
3874
                         p_minor, s_minor):
3875
  """Generate a drbd8 device complete with its children.
3876

3877
  """
3878
  port = lu.cfg.AllocatePort()
3879
  vgname = lu.cfg.GetVGName()
3880
  shared_secret = lu.cfg.GenerateDRBDSecret()
3881
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3882
                          logical_id=(vgname, names[0]))
3883
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3884
                          logical_id=(vgname, names[1]))
3885
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
3886
                          logical_id=(primary, secondary, port,
3887
                                      p_minor, s_minor,
3888
                                      shared_secret),
3889
                          children=[dev_data, dev_meta],
3890
                          iv_name=iv_name)
3891
  return drbd_dev
3892

    
3893

    
3894
def _GenerateDiskTemplate(lu, template_name,
3895
                          instance_name, primary_node,
3896
                          secondary_nodes, disk_info,
3897
                          file_storage_dir, file_driver,
3898
                          base_index):
3899
  """Generate the entire disk layout for a given template type.
3900

3901
  """
3902
  #TODO: compute space requirements
3903

    
3904
  vgname = lu.cfg.GetVGName()
3905
  disk_count = len(disk_info)
3906
  disks = []
3907
  if template_name == constants.DT_DISKLESS:
3908
    pass
3909
  elif template_name == constants.DT_PLAIN:
3910
    if len(secondary_nodes) != 0:
3911
      raise errors.ProgrammerError("Wrong template configuration")
3912

    
3913
    names = _GenerateUniqueNames(lu, [".disk%d" % i
3914
                                      for i in range(disk_count)])
3915
    for idx, disk in enumerate(disk_info):
3916
      disk_index = idx + base_index
3917
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
3918
                              logical_id=(vgname, names[idx]),
3919
                              iv_name="disk/%d" % disk_index)
3920
      disks.append(disk_dev)
3921
  elif template_name == constants.DT_DRBD8:
3922
    if len(secondary_nodes) != 1:
3923
      raise errors.ProgrammerError("Wrong template configuration")
3924
    remote_node = secondary_nodes[0]
3925
    minors = lu.cfg.AllocateDRBDMinor(
3926
      [primary_node, remote_node] * len(disk_info), instance_name)
3927

    
3928
    names = []
3929
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % i
3930
                                               for i in range(disk_count)]):
3931
      names.append(lv_prefix + "_data")
3932
      names.append(lv_prefix + "_meta")
3933
    for idx, disk in enumerate(disk_info):
3934
      disk_index = idx + base_index
3935
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
3936
                                      disk["size"], names[idx*2:idx*2+2],
3937
                                      "disk/%d" % disk_index,
3938
                                      minors[idx*2], minors[idx*2+1])
3939
      disks.append(disk_dev)
3940
  elif template_name == constants.DT_FILE:
3941
    if len(secondary_nodes) != 0:
3942
      raise errors.ProgrammerError("Wrong template configuration")
3943

    
3944
    for idx, disk in enumerate(disk_info):
3945
      disk_index = idx + base_index
3946
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
3947
                              iv_name="disk/%d" % disk_index,
3948
                              logical_id=(file_driver,
3949
                                          "%s/disk%d" % (file_storage_dir,
3950
                                                         idx)))
3951
      disks.append(disk_dev)
3952
  else:
3953
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
3954
  return disks
3955

    
3956

    
3957
def _GetInstanceInfoText(instance):
3958
  """Compute that text that should be added to the disk's metadata.
3959

3960
  """
3961
  return "originstname+%s" % instance.name
3962

    
3963

    
3964
def _CreateDisks(lu, instance):
3965
  """Create all disks for an instance.
3966

3967
  This abstracts away some work from AddInstance.
3968

3969
  @type lu: L{LogicalUnit}
3970
  @param lu: the logical unit on whose behalf we execute
3971
  @type instance: L{objects.Instance}
3972
  @param instance: the instance whose disks we should create
3973
  @rtype: boolean
3974
  @return: the success of the creation
3975

3976
  """
3977
  info = _GetInstanceInfoText(instance)
3978
  pnode = instance.primary_node
3979

    
3980
  if instance.disk_template == constants.DT_FILE:
3981
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3982
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
3983

    
3984
    if result.failed or not result.data:
3985
      raise errors.OpExecError("Could not connect to node '%s'" % pnode)
3986

    
3987
    if not result.data[0]:
3988
      raise errors.OpExecError("Failed to create directory '%s'" %
3989
                               file_storage_dir)
3990

    
3991
  # Note: this needs to be kept in sync with adding of disks in
3992
  # LUSetInstanceParams
3993
  for device in instance.disks:
3994
    logging.info("Creating volume %s for instance %s",
3995
                 device.iv_name, instance.name)
3996
    #HARDCODE
3997
    for node in instance.all_nodes:
3998
      f_create = node == pnode
3999
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
4000

    
4001

    
4002
def _RemoveDisks(lu, instance):
4003
  """Remove all disks for an instance.
4004

4005
  This abstracts away some work from `AddInstance()` and
4006
  `RemoveInstance()`. Note that in case some of the devices couldn't
4007
  be removed, the removal will continue with the other ones (compare
4008
  with `_CreateDisks()`).
4009

4010
  @type lu: L{LogicalUnit}
4011
  @param lu: the logical unit on whose behalf we execute
4012
  @type instance: L{objects.Instance}
4013
  @param instance: the instance whose disks we should remove
4014
  @rtype: boolean
4015
  @return: the success of the removal
4016

4017
  """
4018
  logging.info("Removing block devices for instance %s", instance.name)
4019

    
4020
  result = True
4021
  for device in instance.disks:
4022
    for node, disk in device.ComputeNodeTree(instance.primary_node):
4023
      lu.cfg.SetDiskID(disk, node)
4024
      result = lu.rpc.call_blockdev_remove(node, disk)
4025
      if result.failed or not result.data:
4026
        lu.proc.LogWarning("Could not remove block device %s on node %s,"
4027
                           " continuing anyway", device.iv_name, node)
4028
        result = False
4029

    
4030
  if instance.disk_template == constants.DT_FILE:
4031
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4032
    result = lu.rpc.call_file_storage_dir_remove(instance.primary_node,
4033
                                                 file_storage_dir)
4034
    if result.failed or not result.data:
4035
      logging.error("Could not remove directory '%s'", file_storage_dir)
4036
      result = False
4037

    
4038
  return result
4039

    
4040

    
4041
def _ComputeDiskSize(disk_template, disks):
4042
  """Compute disk size requirements in the volume group
4043

4044
  """
4045
  # Required free disk space as a function of disk and swap space
4046
  req_size_dict = {
4047
    constants.DT_DISKLESS: None,
4048
    constants.DT_PLAIN: sum(d["size"] for d in disks),
4049
    # 128 MB are added for drbd metadata for each disk
4050
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
4051
    constants.DT_FILE: None,
4052
  }
4053

    
4054
  if disk_template not in req_size_dict:
4055
    raise errors.ProgrammerError("Disk template '%s' size requirement"
4056
                                 " is unknown" %  disk_template)
4057

    
4058
  return req_size_dict[disk_template]
4059

    
4060

    
4061
def _CheckHVParams(lu, nodenames, hvname, hvparams):
4062
  """Hypervisor parameter validation.
4063

4064
  This function abstract the hypervisor parameter validation to be
4065
  used in both instance create and instance modify.
4066

4067
  @type lu: L{LogicalUnit}
4068
  @param lu: the logical unit for which we check
4069
  @type nodenames: list
4070
  @param nodenames: the list of nodes on which we should check
4071
  @type hvname: string
4072
  @param hvname: the name of the hypervisor we should use
4073
  @type hvparams: dict
4074
  @param hvparams: the parameters which we need to check
4075
  @raise errors.OpPrereqError: if the parameters are not valid
4076

4077
  """
4078
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
4079
                                                  hvname,
4080
                                                  hvparams)
4081
  for node in nodenames:
4082
    info = hvinfo[node]
4083
    info.Raise()
4084
    if not info.data or not isinstance(info.data, (tuple, list)):
4085
      raise errors.OpPrereqError("Cannot get current information"
4086
                                 " from node '%s' (%s)" % (node, info.data))
4087
    if not info.data[0]:
4088
      raise errors.OpPrereqError("Hypervisor parameter validation failed:"
4089
                                 " %s" % info.data[1])
4090

    
4091

    
4092
class LUCreateInstance(LogicalUnit):
4093
  """Create an instance.
4094

4095
  """
4096
  HPATH = "instance-add"
4097
  HTYPE = constants.HTYPE_INSTANCE
4098
  _OP_REQP = ["instance_name", "disks", "disk_template",
4099
              "mode", "start",
4100
              "wait_for_sync", "ip_check", "nics",
4101
              "hvparams", "beparams"]
4102
  REQ_BGL = False
4103

    
4104
  def _ExpandNode(self, node):
4105
    """Expands and checks one node name.
4106

4107
    """
4108
    node_full = self.cfg.ExpandNodeName(node)
4109
    if node_full is None:
4110
      raise errors.OpPrereqError("Unknown node %s" % node)
4111
    return node_full
4112

    
4113
  def ExpandNames(self):
4114
    """ExpandNames for CreateInstance.
4115

4116
    Figure out the right locks for instance creation.
4117

4118
    """
4119
    self.needed_locks = {}
4120

    
4121
    # set optional parameters to none if they don't exist
4122
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
4123
      if not hasattr(self.op, attr):
4124
        setattr(self.op, attr, None)
4125

    
4126
    # cheap checks, mostly valid constants given
4127

    
4128
    # verify creation mode
4129
    if self.op.mode not in (constants.INSTANCE_CREATE,
4130
                            constants.INSTANCE_IMPORT):
4131
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
4132
                                 self.op.mode)
4133

    
4134
    # disk template and mirror node verification
4135
    if self.op.disk_template not in constants.DISK_TEMPLATES:
4136
      raise errors.OpPrereqError("Invalid disk template name")
4137

    
4138
    if self.op.hypervisor is None:
4139
      self.op.hypervisor = self.cfg.GetHypervisorType()
4140

    
4141
    cluster = self.cfg.GetClusterInfo()
4142
    enabled_hvs = cluster.enabled_hypervisors
4143
    if self.op.hypervisor not in enabled_hvs:
4144
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
4145
                                 " cluster (%s)" % (self.op.hypervisor,
4146
                                  ",".join(enabled_hvs)))
4147

    
4148
    # check hypervisor parameter syntax (locally)
4149

    
4150
    filled_hvp = cluster.FillDict(cluster.hvparams[self.op.hypervisor],
4151
                                  self.op.hvparams)
4152
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
4153
    hv_type.CheckParameterSyntax(filled_hvp)
4154

    
4155
    # fill and remember the beparams dict
4156
    utils.CheckBEParams(self.op.beparams)
4157
    self.be_full = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
4158
                                    self.op.beparams)
4159

    
4160
    #### instance parameters check
4161

    
4162
    # instance name verification
4163
    hostname1 = utils.HostInfo(self.op.instance_name)
4164
    self.op.instance_name = instance_name = hostname1.name
4165

    
4166
    # this is just a preventive check, but someone might still add this
4167
    # instance in the meantime, and creation will fail at lock-add time
4168
    if instance_name in self.cfg.GetInstanceList():
4169
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
4170
                                 instance_name)
4171

    
4172
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
4173

    
4174
    # NIC buildup
4175
    self.nics = []
4176
    for nic in self.op.nics:
4177
      # ip validity checks
4178
      ip = nic.get("ip", None)
4179
      if ip is None or ip.lower() == "none":
4180
        nic_ip = None
4181
      elif ip.lower() == constants.VALUE_AUTO:
4182
        nic_ip = hostname1.ip
4183
      else:
4184
        if not utils.IsValidIP(ip):
4185
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
4186
                                     " like a valid IP" % ip)
4187
        nic_ip = ip
4188

    
4189
      # MAC address verification
4190
      mac = nic.get("mac", constants.VALUE_AUTO)
4191
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4192
        if not utils.IsValidMac(mac.lower()):
4193
          raise errors.OpPrereqError("Invalid MAC address specified: %s" %
4194
                                     mac)
4195
      # bridge verification
4196
      bridge = nic.get("bridge", None)
4197
      if bridge is None:
4198
        bridge = self.cfg.GetDefBridge()
4199
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, bridge=bridge))
4200

    
4201
    # disk checks/pre-build
4202
    self.disks = []
4203
    for disk in self.op.disks:
4204
      mode = disk.get("mode", constants.DISK_RDWR)
4205
      if mode not in constants.DISK_ACCESS_SET:
4206
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
4207
                                   mode)
4208
      size = disk.get("size", None)
4209
      if size is None:
4210
        raise errors.OpPrereqError("Missing disk size")
4211
      try:
4212
        size = int(size)
4213
      except ValueError:
4214
        raise errors.OpPrereqError("Invalid disk size '%s'" % size)
4215
      self.disks.append({"size": size, "mode": mode})
4216

    
4217
    # used in CheckPrereq for ip ping check
4218
    self.check_ip = hostname1.ip
4219

    
4220
    # file storage checks
4221
    if (self.op.file_driver and
4222
        not self.op.file_driver in constants.FILE_DRIVER):
4223
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
4224
                                 self.op.file_driver)
4225

    
4226
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
4227
      raise errors.OpPrereqError("File storage directory path not absolute")
4228

    
4229
    ### Node/iallocator related checks
4230
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
4231
      raise errors.OpPrereqError("One and only one of iallocator and primary"
4232
                                 " node must be given")
4233

    
4234
    if self.op.iallocator:
4235
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4236
    else:
4237
      self.op.pnode = self._ExpandNode(self.op.pnode)
4238
      nodelist = [self.op.pnode]
4239
      if self.op.snode is not None:
4240
        self.op.snode = self._ExpandNode(self.op.snode)
4241
        nodelist.append(self.op.snode)
4242
      self.needed_locks[locking.LEVEL_NODE] = nodelist
4243

    
4244
    # in case of import lock the source node too
4245
    if self.op.mode == constants.INSTANCE_IMPORT:
4246
      src_node = getattr(self.op, "src_node", None)
4247
      src_path = getattr(self.op, "src_path", None)
4248

    
4249
      if src_path is None:
4250
        self.op.src_path = src_path = self.op.instance_name
4251

    
4252
      if src_node is None:
4253
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4254
        self.op.src_node = None
4255
        if os.path.isabs(src_path):
4256
          raise errors.OpPrereqError("Importing an instance from an absolute"
4257
                                     " path requires a source node option.")
4258
      else:
4259
        self.op.src_node = src_node = self._ExpandNode(src_node)
4260
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4261
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
4262
        if not os.path.isabs(src_path):
4263
          self.op.src_path = src_path = \
4264
            os.path.join(constants.EXPORT_DIR, src_path)
4265

    
4266
    else: # INSTANCE_CREATE
4267
      if getattr(self.op, "os_type", None) is None:
4268
        raise errors.OpPrereqError("No guest OS specified")
4269

    
4270
  def _RunAllocator(self):
4271
    """Run the allocator based on input opcode.
4272

4273
    """
4274
    nics = [n.ToDict() for n in self.nics]
4275
    ial = IAllocator(self,
4276
                     mode=constants.IALLOCATOR_MODE_ALLOC,
4277
                     name=self.op.instance_name,
4278
                     disk_template=self.op.disk_template,
4279
                     tags=[],
4280
                     os=self.op.os_type,
4281
                     vcpus=self.be_full[constants.BE_VCPUS],
4282
                     mem_size=self.be_full[constants.BE_MEMORY],
4283
                     disks=self.disks,
4284
                     nics=nics,
4285
                     hypervisor=self.op.hypervisor,
4286
                     )
4287

    
4288
    ial.Run(self.op.iallocator)
4289

    
4290
    if not ial.success:
4291
      raise errors.OpPrereqError("Can't compute nodes using"
4292
                                 " iallocator '%s': %s" % (self.op.iallocator,
4293
                                                           ial.info))
4294
    if len(ial.nodes) != ial.required_nodes:
4295
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4296
                                 " of nodes (%s), required %s" %
4297
                                 (self.op.iallocator, len(ial.nodes),
4298
                                  ial.required_nodes))
4299
    self.op.pnode = ial.nodes[0]
4300
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
4301
                 self.op.instance_name, self.op.iallocator,
4302
                 ", ".join(ial.nodes))
4303
    if ial.required_nodes == 2:
4304
      self.op.snode = ial.nodes[1]
4305

    
4306
  def BuildHooksEnv(self):
4307
    """Build hooks env.
4308

4309
    This runs on master, primary and secondary nodes of the instance.
4310

4311
    """
4312
    env = {
4313
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
4314
      "INSTANCE_DISK_SIZE": ",".join(str(d["size"]) for d in self.disks),
4315
      "INSTANCE_ADD_MODE": self.op.mode,
4316
      }
4317
    if self.op.mode == constants.INSTANCE_IMPORT:
4318
      env["INSTANCE_SRC_NODE"] = self.op.src_node
4319
      env["INSTANCE_SRC_PATH"] = self.op.src_path
4320
      env["INSTANCE_SRC_IMAGES"] = self.src_images
4321

    
4322
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
4323
      primary_node=self.op.pnode,
4324
      secondary_nodes=self.secondaries,
4325
      status=self.instance_status,
4326
      os_type=self.op.os_type,
4327
      memory=self.be_full[constants.BE_MEMORY],
4328
      vcpus=self.be_full[constants.BE_VCPUS],
4329
      nics=[(n.ip, n.bridge, n.mac) for n in self.nics],
4330
    ))
4331

    
4332
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
4333
          self.secondaries)
4334
    return env, nl, nl
4335

    
4336

    
4337
  def CheckPrereq(self):
4338
    """Check prerequisites.
4339

4340
    """
4341
    if (not self.cfg.GetVGName() and
4342
        self.op.disk_template not in constants.DTS_NOT_LVM):
4343
      raise errors.OpPrereqError("Cluster does not support lvm-based"
4344
                                 " instances")
4345

    
4346

    
4347
    if self.op.mode == constants.INSTANCE_IMPORT:
4348
      src_node = self.op.src_node
4349
      src_path = self.op.src_path
4350

    
4351
      if src_node is None:
4352
        exp_list = self.rpc.call_export_list(
4353
          self.acquired_locks[locking.LEVEL_NODE])
4354
        found = False
4355
        for node in exp_list:
4356
          if not exp_list[node].failed and src_path in exp_list[node].data:
4357
            found = True
4358
            self.op.src_node = src_node = node
4359
            self.op.src_path = src_path = os.path.join(constants.EXPORT_DIR,
4360
                                                       src_path)
4361
            break
4362
        if not found:
4363
          raise errors.OpPrereqError("No export found for relative path %s" %
4364
                                      src_path)
4365

    
4366
      _CheckNodeOnline(self, src_node)
4367
      result = self.rpc.call_export_info(src_node, src_path)
4368
      result.Raise()
4369
      if not result.data:
4370
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
4371

    
4372
      export_info = result.data
4373
      if not export_info.has_section(constants.INISECT_EXP):
4374
        raise errors.ProgrammerError("Corrupted export config")
4375

    
4376
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
4377
      if (int(ei_version) != constants.EXPORT_VERSION):
4378
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
4379
                                   (ei_version, constants.EXPORT_VERSION))
4380

    
4381
      # Check that the new instance doesn't have less disks than the export
4382
      instance_disks = len(self.disks)
4383
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
4384
      if instance_disks < export_disks:
4385
        raise errors.OpPrereqError("Not enough disks to import."
4386
                                   " (instance: %d, export: %d)" %
4387
                                   (instance_disks, export_disks))
4388

    
4389
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
4390
      disk_images = []
4391
      for idx in range(export_disks):
4392
        option = 'disk%d_dump' % idx
4393
        if export_info.has_option(constants.INISECT_INS, option):
4394
          # FIXME: are the old os-es, disk sizes, etc. useful?
4395
          export_name = export_info.get(constants.INISECT_INS, option)
4396
          image = os.path.join(src_path, export_name)
4397
          disk_images.append(image)
4398
        else:
4399
          disk_images.append(False)
4400

    
4401
      self.src_images = disk_images
4402

    
4403
      old_name = export_info.get(constants.INISECT_INS, 'name')
4404
      # FIXME: int() here could throw a ValueError on broken exports
4405
      exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
4406
      if self.op.instance_name == old_name:
4407
        for idx, nic in enumerate(self.nics):
4408
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
4409
            nic_mac_ini = 'nic%d_mac' % idx
4410
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
4411

    
4412
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
4413
    if self.op.start and not self.op.ip_check:
4414
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
4415
                                 " adding an instance in start mode")
4416

    
4417
    if self.op.ip_check:
4418
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
4419
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
4420
                                   (self.check_ip, self.op.instance_name))
4421

    
4422
    #### allocator run
4423

    
4424
    if self.op.iallocator is not None:
4425
      self._RunAllocator()
4426

    
4427
    #### node related checks
4428

    
4429
    # check primary node
4430
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
4431
    assert self.pnode is not None, \
4432
      "Cannot retrieve locked node %s" % self.op.pnode
4433
    if pnode.offline:
4434
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
4435
                                 pnode.name)
4436

    
4437
    self.secondaries = []
4438

    
4439
    # mirror node verification
4440
    if self.op.disk_template in constants.DTS_NET_MIRROR:
4441
      if self.op.snode is None:
4442
        raise errors.OpPrereqError("The networked disk templates need"
4443
                                   " a mirror node")
4444
      if self.op.snode == pnode.name:
4445
        raise errors.OpPrereqError("The secondary node cannot be"
4446
                                   " the primary node.")
4447
      self.secondaries.append(self.op.snode)
4448
      _CheckNodeOnline(self, self.op.snode)
4449

    
4450
    nodenames = [pnode.name] + self.secondaries
4451

    
4452
    req_size = _ComputeDiskSize(self.op.disk_template,
4453
                                self.disks)
4454

    
4455
    # Check lv size requirements
4456
    if req_size is not None:
4457
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4458
                                         self.op.hypervisor)
4459
      for node in nodenames:
4460
        info = nodeinfo[node]
4461
        info.Raise()
4462
        info = info.data
4463
        if not info:
4464
          raise errors.OpPrereqError("Cannot get current information"
4465
                                     " from node '%s'" % node)
4466
        vg_free = info.get('vg_free', None)
4467
        if not isinstance(vg_free, int):
4468
          raise errors.OpPrereqError("Can't compute free disk space on"
4469
                                     " node %s" % node)
4470
        if req_size > info['vg_free']:
4471
          raise errors.OpPrereqError("Not enough disk space on target node %s."
4472
                                     " %d MB available, %d MB required" %
4473
                                     (node, info['vg_free'], req_size))
4474

    
4475
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
4476

    
4477
    # os verification
4478
    result = self.rpc.call_os_get(pnode.name, self.op.os_type)
4479
    result.Raise()
4480
    if not isinstance(result.data, objects.OS):
4481
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
4482
                                 " primary node"  % self.op.os_type)
4483

    
4484
    # bridge check on primary node
4485
    bridges = [n.bridge for n in self.nics]
4486
    result = self.rpc.call_bridges_exist(self.pnode.name, bridges)
4487
    result.Raise()
4488
    if not result.data:
4489
      raise errors.OpPrereqError("One of the target bridges '%s' does not"
4490
                                 " exist on destination node '%s'" %
4491
                                 (",".join(bridges), pnode.name))
4492

    
4493
    # memory check on primary node
4494
    if self.op.start:
4495
      _CheckNodeFreeMemory(self, self.pnode.name,
4496
                           "creating instance %s" % self.op.instance_name,
4497
                           self.be_full[constants.BE_MEMORY],
4498
                           self.op.hypervisor)
4499

    
4500
    self.instance_status = self.op.start
4501

    
4502
  def Exec(self, feedback_fn):
4503
    """Create and add the instance to the cluster.
4504

4505
    """
4506
    instance = self.op.instance_name
4507
    pnode_name = self.pnode.name
4508

    
4509
    for nic in self.nics:
4510
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4511
        nic.mac = self.cfg.GenerateMAC()
4512

    
4513
    ht_kind = self.op.hypervisor
4514
    if ht_kind in constants.HTS_REQ_PORT:
4515
      network_port = self.cfg.AllocatePort()
4516
    else:
4517
      network_port = None
4518

    
4519
    ##if self.op.vnc_bind_address is None:
4520
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
4521

    
4522
    # this is needed because os.path.join does not accept None arguments
4523
    if self.op.file_storage_dir is None:
4524
      string_file_storage_dir = ""
4525
    else:
4526
      string_file_storage_dir = self.op.file_storage_dir
4527

    
4528
    # build the full file storage dir path
4529
    file_storage_dir = os.path.normpath(os.path.join(
4530
                                        self.cfg.GetFileStorageDir(),
4531
                                        string_file_storage_dir, instance))
4532

    
4533

    
4534
    disks = _GenerateDiskTemplate(self,
4535
                                  self.op.disk_template,
4536
                                  instance, pnode_name,
4537
                                  self.secondaries,
4538
                                  self.disks,
4539
                                  file_storage_dir,
4540
                                  self.op.file_driver,
4541
                                  0)
4542

    
4543
    iobj = objects.Instance(name=instance, os=self.op.os_type,
4544
                            primary_node=pnode_name,
4545
                            nics=self.nics, disks=disks,
4546
                            disk_template=self.op.disk_template,
4547
                            admin_up=self.instance_status,
4548
                            network_port=network_port,
4549
                            beparams=self.op.beparams,
4550
                            hvparams=self.op.hvparams,
4551
                            hypervisor=self.op.hypervisor,
4552
                            )
4553

    
4554
    feedback_fn("* creating instance disks...")
4555
    try:
4556
      _CreateDisks(self, iobj)
4557
    except errors.OpExecError:
4558
      self.LogWarning("Device creation failed, reverting...")
4559
      try:
4560
        _RemoveDisks(self, iobj)
4561
      finally:
4562
        self.cfg.ReleaseDRBDMinors(instance)
4563
        raise
4564

    
4565
    feedback_fn("adding instance %s to cluster config" % instance)
4566

    
4567
    self.cfg.AddInstance(iobj)
4568
    # Declare that we don't want to remove the instance lock anymore, as we've
4569
    # added the instance to the config
4570
    del self.remove_locks[locking.LEVEL_INSTANCE]
4571
    # Unlock all the nodes
4572
    if self.op.mode == constants.INSTANCE_IMPORT:
4573
      nodes_keep = [self.op.src_node]
4574
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
4575
                       if node != self.op.src_node]
4576
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
4577
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
4578
    else:
4579
      self.context.glm.release(locking.LEVEL_NODE)
4580
      del self.acquired_locks[locking.LEVEL_NODE]
4581

    
4582
    if self.op.wait_for_sync:
4583
      disk_abort = not _WaitForSync(self, iobj)
4584
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
4585
      # make sure the disks are not degraded (still sync-ing is ok)
4586
      time.sleep(15)
4587
      feedback_fn("* checking mirrors status")
4588
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
4589
    else:
4590
      disk_abort = False
4591

    
4592
    if disk_abort:
4593
      _RemoveDisks(self, iobj)
4594
      self.cfg.RemoveInstance(iobj.name)
4595
      # Make sure the instance lock gets removed
4596
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
4597
      raise errors.OpExecError("There are some degraded disks for"
4598
                               " this instance")
4599

    
4600
    feedback_fn("creating os for instance %s on node %s" %
4601
                (instance, pnode_name))
4602

    
4603
    if iobj.disk_template != constants.DT_DISKLESS:
4604
      if self.op.mode == constants.INSTANCE_CREATE:
4605
        feedback_fn("* running the instance OS create scripts...")
4606
        result = self.rpc.call_instance_os_add(pnode_name, iobj)
4607
        msg = result.RemoteFailMsg()
4608
        if msg:
4609
          raise errors.OpExecError("Could not add os for instance %s"
4610
                                   " on node %s: %s" %
4611
                                   (instance, pnode_name, msg))
4612

    
4613
      elif self.op.mode == constants.INSTANCE_IMPORT:
4614
        feedback_fn("* running the instance OS import scripts...")
4615
        src_node = self.op.src_node
4616
        src_images = self.src_images
4617
        cluster_name = self.cfg.GetClusterName()
4618
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
4619
                                                         src_node, src_images,
4620
                                                         cluster_name)
4621
        import_result.Raise()
4622
        for idx, result in enumerate(import_result.data):
4623
          if not result:
4624
            self.LogWarning("Could not import the image %s for instance"
4625
                            " %s, disk %d, on node %s" %
4626
                            (src_images[idx], instance, idx, pnode_name))
4627
      else:
4628
        # also checked in the prereq part
4629
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
4630
                                     % self.op.mode)
4631

    
4632
    if self.op.start:
4633
      logging.info("Starting instance %s on node %s", instance, pnode_name)
4634
      feedback_fn("* starting instance...")
4635
      result = self.rpc.call_instance_start(pnode_name, iobj, None)
4636
      msg = result.RemoteFailMsg()
4637
      if msg:
4638
        raise errors.OpExecError("Could not start instance: %s" % msg)
4639

    
4640

    
4641
class LUConnectConsole(NoHooksLU):
4642
  """Connect to an instance's console.
4643

4644
  This is somewhat special in that it returns the command line that
4645
  you need to run on the master node in order to connect to the
4646
  console.
4647

4648
  """
4649
  _OP_REQP = ["instance_name"]
4650
  REQ_BGL = False
4651

    
4652
  def ExpandNames(self):
4653
    self._ExpandAndLockInstance()
4654

    
4655
  def CheckPrereq(self):
4656
    """Check prerequisites.
4657

4658
    This checks that the instance is in the cluster.
4659

4660
    """
4661
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4662
    assert self.instance is not None, \
4663
      "Cannot retrieve locked instance %s" % self.op.instance_name
4664
    _CheckNodeOnline(self, self.instance.primary_node)
4665

    
4666
  def Exec(self, feedback_fn):
4667
    """Connect to the console of an instance
4668

4669
    """
4670
    instance = self.instance
4671
    node = instance.primary_node
4672

    
4673
    node_insts = self.rpc.call_instance_list([node],
4674
                                             [instance.hypervisor])[node]
4675
    node_insts.Raise()
4676

    
4677
    if instance.name not in node_insts.data:
4678
      raise errors.OpExecError("Instance %s is not running." % instance.name)
4679

    
4680
    logging.debug("Connecting to console of %s on %s", instance.name, node)
4681

    
4682
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
4683
    console_cmd = hyper.GetShellCommandForConsole(instance)
4684

    
4685
    # build ssh cmdline
4686
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
4687

    
4688

    
4689
class LUReplaceDisks(LogicalUnit):
4690
  """Replace the disks of an instance.
4691

4692
  """
4693
  HPATH = "mirrors-replace"
4694
  HTYPE = constants.HTYPE_INSTANCE
4695
  _OP_REQP = ["instance_name", "mode", "disks"]
4696
  REQ_BGL = False
4697

    
4698
  def CheckArguments(self):
4699
    if not hasattr(self.op, "remote_node"):
4700
      self.op.remote_node = None
4701
    if not hasattr(self.op, "iallocator"):
4702
      self.op.iallocator = None
4703

    
4704
    # check for valid parameter combination
4705
    cnt = [self.op.remote_node, self.op.iallocator].count(None)
4706
    if self.op.mode == constants.REPLACE_DISK_CHG:
4707
      if cnt == 2:
4708
        raise errors.OpPrereqError("When changing the secondary either an"
4709
                                   " iallocator script must be used or the"
4710
                                   " new node given")
4711
      elif cnt == 0:
4712
        raise errors.OpPrereqError("Give either the iallocator or the new"
4713
                                   " secondary, not both")
4714
    else: # not replacing the secondary
4715
      if cnt != 2:
4716
        raise errors.OpPrereqError("The iallocator and new node options can"
4717
                                   " be used only when changing the"
4718
                                   " secondary node")
4719

    
4720
  def ExpandNames(self):
4721
    self._ExpandAndLockInstance()
4722

    
4723
    if self.op.iallocator is not None:
4724
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4725
    elif self.op.remote_node is not None:
4726
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
4727
      if remote_node is None:
4728
        raise errors.OpPrereqError("Node '%s' not known" %
4729
                                   self.op.remote_node)
4730
      self.op.remote_node = remote_node
4731
      # Warning: do not remove the locking of the new secondary here
4732
      # unless DRBD8.AddChildren is changed to work in parallel;
4733
      # currently it doesn't since parallel invocations of
4734
      # FindUnusedMinor will conflict
4735
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
4736
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
4737
    else:
4738
      self.needed_locks[locking.LEVEL_NODE] = []
4739
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4740

    
4741
  def DeclareLocks(self, level):
4742
    # If we're not already locking all nodes in the set we have to declare the
4743
    # instance's primary/secondary nodes.
4744
    if (level == locking.LEVEL_NODE and
4745
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
4746
      self._LockInstancesNodes()
4747

    
4748
  def _RunAllocator(self):
4749
    """Compute a new secondary node using an IAllocator.
4750

4751
    """
4752
    ial = IAllocator(self,
4753
                     mode=constants.IALLOCATOR_MODE_RELOC,
4754
                     name=self.op.instance_name,
4755
                     relocate_from=[self.sec_node])
4756

    
4757
    ial.Run(self.op.iallocator)
4758

    
4759
    if not ial.success:
4760
      raise errors.OpPrereqError("Can't compute nodes using"
4761
                                 " iallocator '%s': %s" % (self.op.iallocator,
4762
                                                           ial.info))
4763
    if len(ial.nodes) != ial.required_nodes:
4764
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4765
                                 " of nodes (%s), required %s" %
4766
                                 (len(ial.nodes), ial.required_nodes))
4767
    self.op.remote_node = ial.nodes[0]
4768
    self.LogInfo("Selected new secondary for the instance: %s",
4769
                 self.op.remote_node)
4770

    
4771
  def BuildHooksEnv(self):
4772
    """Build hooks env.
4773

4774
    This runs on the master, the primary and all the secondaries.
4775

4776
    """
4777
    env = {
4778
      "MODE": self.op.mode,
4779
      "NEW_SECONDARY": self.op.remote_node,
4780
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
4781
      }
4782
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4783
    nl = [
4784
      self.cfg.GetMasterNode(),
4785
      self.instance.primary_node,
4786
      ]
4787
    if self.op.remote_node is not None:
4788
      nl.append(self.op.remote_node)
4789
    return env, nl, nl
4790

    
4791
  def CheckPrereq(self):
4792
    """Check prerequisites.
4793

4794
    This checks that the instance is in the cluster.
4795

4796
    """
4797
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4798
    assert instance is not None, \
4799
      "Cannot retrieve locked instance %s" % self.op.instance_name
4800
    self.instance = instance
4801

    
4802
    if instance.disk_template != constants.DT_DRBD8:
4803
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
4804
                                 " instances")
4805

    
4806
    if len(instance.secondary_nodes) != 1:
4807
      raise errors.OpPrereqError("The instance has a strange layout,"
4808
                                 " expected one secondary but found %d" %
4809
                                 len(instance.secondary_nodes))
4810

    
4811
    self.sec_node = instance.secondary_nodes[0]
4812

    
4813
    if self.op.iallocator is not None:
4814
      self._RunAllocator()
4815

    
4816
    remote_node = self.op.remote_node
4817
    if remote_node is not None:
4818
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
4819
      assert self.remote_node_info is not None, \
4820
        "Cannot retrieve locked node %s" % remote_node
4821
    else:
4822
      self.remote_node_info = None
4823
    if remote_node == instance.primary_node:
4824
      raise errors.OpPrereqError("The specified node is the primary node of"
4825
                                 " the instance.")
4826
    elif remote_node == self.sec_node:
4827
      raise errors.OpPrereqError("The specified node is already the"
4828
                                 " secondary node of the instance.")
4829

    
4830
    if self.op.mode == constants.REPLACE_DISK_PRI:
4831
      n1 = self.tgt_node = instance.primary_node
4832
      n2 = self.oth_node = self.sec_node
4833
    elif self.op.mode == constants.REPLACE_DISK_SEC:
4834
      n1 = self.tgt_node = self.sec_node
4835
      n2 = self.oth_node = instance.primary_node
4836
    elif self.op.mode == constants.REPLACE_DISK_CHG:
4837
      n1 = self.new_node = remote_node
4838
      n2 = self.oth_node = instance.primary_node
4839
      self.tgt_node = self.sec_node
4840
    else:
4841
      raise errors.ProgrammerError("Unhandled disk replace mode")
4842

    
4843
    _CheckNodeOnline(self, n1)
4844
    _CheckNodeOnline(self, n2)
4845

    
4846
    if not self.op.disks:
4847
      self.op.disks = range(len(instance.disks))
4848

    
4849
    for disk_idx in self.op.disks:
4850
      instance.FindDisk(disk_idx)
4851

    
4852
  def _ExecD8DiskOnly(self, feedback_fn):
4853
    """Replace a disk on the primary or secondary for dbrd8.
4854

4855
    The algorithm for replace is quite complicated:
4856

4857
      1. for each disk to be replaced:
4858

4859
        1. create new LVs on the target node with unique names
4860
        1. detach old LVs from the drbd device
4861
        1. rename old LVs to name_replaced.<time_t>
4862
        1. rename new LVs to old LVs
4863
        1. attach the new LVs (with the old names now) to the drbd device
4864

4865
      1. wait for sync across all devices
4866

4867
      1. for each modified disk:
4868

4869
        1. remove old LVs (which have the name name_replaces.<time_t>)
4870

4871
    Failures are not very well handled.
4872

4873
    """
4874
    steps_total = 6
4875
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
4876
    instance = self.instance
4877
    iv_names = {}
4878
    vgname = self.cfg.GetVGName()
4879
    # start of work
4880
    cfg = self.cfg
4881
    tgt_node = self.tgt_node
4882
    oth_node = self.oth_node
4883

    
4884
    # Step: check device activation
4885
    self.proc.LogStep(1, steps_total, "check device existence")
4886
    info("checking volume groups")
4887
    my_vg = cfg.GetVGName()
4888
    results = self.rpc.call_vg_list([oth_node, tgt_node])
4889
    if not results:
4890
      raise errors.OpExecError("Can't list volume groups on the nodes")
4891
    for node in oth_node, tgt_node:
4892
      res = results[node]
4893
      if res.failed or not res.data or my_vg not in res.data:
4894
        raise errors.OpExecError("Volume group '%s' not found on %s" %
4895
                                 (my_vg, node))
4896
    for idx, dev in enumerate(instance.disks):
4897
      if idx not in self.op.disks:
4898
        continue
4899
      for node in tgt_node, oth_node:
4900
        info("checking disk/%d on %s" % (idx, node))
4901
        cfg.SetDiskID(dev, node)
4902
        if not self.rpc.call_blockdev_find(node, dev):
4903
          raise errors.OpExecError("Can't find disk/%d on node %s" %
4904
                                   (idx, node))
4905

    
4906
    # Step: check other node consistency
4907
    self.proc.LogStep(2, steps_total, "check peer consistency")
4908
    for idx, dev in enumerate(instance.disks):
4909
      if idx not in self.op.disks:
4910
        continue
4911
      info("checking disk/%d consistency on %s" % (idx, oth_node))
4912
      if not _CheckDiskConsistency(self, dev, oth_node,
4913
                                   oth_node==instance.primary_node):
4914
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
4915
                                 " to replace disks on this node (%s)" %
4916
                                 (oth_node, tgt_node))
4917

    
4918
    # Step: create new storage
4919
    self.proc.LogStep(3, steps_total, "allocate new storage")
4920
    for idx, dev in enumerate(instance.disks):
4921
      if idx not in self.op.disks:
4922
        continue
4923
      size = dev.size
4924
      cfg.SetDiskID(dev, tgt_node)
4925
      lv_names = [".disk%d_%s" % (idx, suf)
4926
                  for suf in ["data", "meta"]]
4927
      names = _GenerateUniqueNames(self, lv_names)
4928
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
4929
                             logical_id=(vgname, names[0]))
4930
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
4931
                             logical_id=(vgname, names[1]))
4932
      new_lvs = [lv_data, lv_meta]
4933
      old_lvs = dev.children
4934
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
4935
      info("creating new local storage on %s for %s" %
4936
           (tgt_node, dev.iv_name))
4937
      # we pass force_create=True to force the LVM creation
4938
      for new_lv in new_lvs:
4939
        _CreateBlockDev(self, tgt_node, instance, new_lv, True,
4940
                        _GetInstanceInfoText(instance), False)
4941

    
4942
    # Step: for each lv, detach+rename*2+attach
4943
    self.proc.LogStep(4, steps_total, "change drbd configuration")
4944
    for dev, old_lvs, new_lvs in iv_names.itervalues():
4945
      info("detaching %s drbd from local storage" % dev.iv_name)
4946
      result = self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs)
4947
      result.Raise()
4948
      if not result.data:
4949
        raise errors.OpExecError("Can't detach drbd from local storage on node"
4950
                                 " %s for device %s" % (tgt_node, dev.iv_name))
4951
      #dev.children = []
4952
      #cfg.Update(instance)
4953

    
4954
      # ok, we created the new LVs, so now we know we have the needed
4955
      # storage; as such, we proceed on the target node to rename
4956
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
4957
      # using the assumption that logical_id == physical_id (which in
4958
      # turn is the unique_id on that node)
4959

    
4960
      # FIXME(iustin): use a better name for the replaced LVs
4961
      temp_suffix = int(time.time())
4962
      ren_fn = lambda d, suff: (d.physical_id[0],
4963
                                d.physical_id[1] + "_replaced-%s" % suff)
4964
      # build the rename list based on what LVs exist on the node
4965
      rlist = []
4966
      for to_ren in old_lvs:
4967
        find_res = self.rpc.call_blockdev_find(tgt_node, to_ren)
4968
        if not find_res.failed and find_res.data is not None: # device exists
4969
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
4970

    
4971
      info("renaming the old LVs on the target node")
4972
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
4973
      result.Raise()
4974
      if not result.data:
4975
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
4976
      # now we rename the new LVs to the old LVs
4977
      info("renaming the new LVs on the target node")
4978
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
4979
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
4980
      result.Raise()
4981
      if not result.data:
4982
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
4983

    
4984
      for old, new in zip(old_lvs, new_lvs):
4985
        new.logical_id = old.logical_id
4986
        cfg.SetDiskID(new, tgt_node)
4987

    
4988
      for disk in old_lvs:
4989
        disk.logical_id = ren_fn(disk, temp_suffix)
4990
        cfg.SetDiskID(disk, tgt_node)
4991

    
4992
      # now that the new lvs have the old name, we can add them to the device
4993
      info("adding new mirror component on %s" % tgt_node)
4994
      result = self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs)
4995
      if result.failed or not result.data:
4996
        for new_lv in new_lvs:
4997
          result = self.rpc.call_blockdev_remove(tgt_node, new_lv)
4998
          if result.failed or not result.data:
4999
            warning("Can't rollback device %s", hint="manually cleanup unused"
5000
                    " logical volumes")
5001
        raise errors.OpExecError("Can't add local storage to drbd")
5002

    
5003
      dev.children = new_lvs
5004
      cfg.Update(instance)
5005

    
5006
    # Step: wait for sync
5007

    
5008
    # this can fail as the old devices are degraded and _WaitForSync
5009
    # does a combined result over all disks, so we don't check its
5010
    # return value
5011
    self.proc.LogStep(5, steps_total, "sync devices")
5012
    _WaitForSync(self, instance, unlock=True)
5013

    
5014
    # so check manually all the devices
5015
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5016
      cfg.SetDiskID(dev, instance.primary_node)
5017
      result = self.rpc.call_blockdev_find(instance.primary_node, dev)
5018
      if result.failed or result.data[5]:
5019
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
5020

    
5021
    # Step: remove old storage
5022
    self.proc.LogStep(6, steps_total, "removing old storage")
5023
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5024
      info("remove logical volumes for %s" % name)
5025
      for lv in old_lvs:
5026
        cfg.SetDiskID(lv, tgt_node)
5027
        result = self.rpc.call_blockdev_remove(tgt_node, lv)
5028
        if result.failed or not result.data:
5029
          warning("Can't remove old LV", hint="manually remove unused LVs")
5030
          continue
5031

    
5032
  def _ExecD8Secondary(self, feedback_fn):
5033
    """Replace the secondary node for drbd8.
5034

5035
    The algorithm for replace is quite complicated:
5036
      - for all disks of the instance:
5037
        - create new LVs on the new node with same names
5038
        - shutdown the drbd device on the old secondary
5039
        - disconnect the drbd network on the primary
5040
        - create the drbd device on the new secondary
5041
        - network attach the drbd on the primary, using an artifice:
5042
          the drbd code for Attach() will connect to the network if it
5043
          finds a device which is connected to the good local disks but
5044
          not network enabled
5045
      - wait for sync across all devices
5046
      - remove all disks from the old secondary
5047

5048
    Failures are not very well handled.
5049

5050
    """
5051
    steps_total = 6
5052
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
5053
    instance = self.instance
5054
    iv_names = {}
5055
    # start of work
5056
    cfg = self.cfg
5057
    old_node = self.tgt_node
5058
    new_node = self.new_node
5059
    pri_node = instance.primary_node
5060
    nodes_ip = {
5061
      old_node: self.cfg.GetNodeInfo(old_node).secondary_ip,
5062
      new_node: self.cfg.GetNodeInfo(new_node).secondary_ip,
5063
      pri_node: self.cfg.GetNodeInfo(pri_node).secondary_ip,
5064
      }
5065

    
5066
    # Step: check device activation
5067
    self.proc.LogStep(1, steps_total, "check device existence")
5068
    info("checking volume groups")
5069
    my_vg = cfg.GetVGName()
5070
    results = self.rpc.call_vg_list([pri_node, new_node])
5071
    for node in pri_node, new_node:
5072
      res = results[node]
5073
      if res.failed or not res.data or my_vg not in res.data:
5074
        raise errors.OpExecError("Volume group '%s' not found on %s" %
5075
                                 (my_vg, node))
5076
    for idx, dev in enumerate(instance.disks):
5077
      if idx not in self.op.disks:
5078
        continue
5079
      info("checking disk/%d on %s" % (idx, pri_node))
5080
      cfg.SetDiskID(dev, pri_node)
5081
      result = self.rpc.call_blockdev_find(pri_node, dev)
5082
      result.Raise()
5083
      if not result.data:
5084
        raise errors.OpExecError("Can't find disk/%d on node %s" %
5085
                                 (idx, pri_node))
5086

    
5087
    # Step: check other node consistency
5088
    self.proc.LogStep(2, steps_total, "check peer consistency")
5089
    for idx, dev in enumerate(instance.disks):
5090
      if idx not in self.op.disks:
5091
        continue
5092
      info("checking disk/%d consistency on %s" % (idx, pri_node))
5093
      if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
5094
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
5095
                                 " unsafe to replace the secondary" %
5096
                                 pri_node)
5097

    
5098
    # Step: create new storage
5099
    self.proc.LogStep(3, steps_total, "allocate new storage")
5100
    for idx, dev in enumerate(instance.disks):
5101
      info("adding new local storage on %s for disk/%d" %
5102
           (new_node, idx))
5103
      # we pass force_create=True to force LVM creation
5104
      for new_lv in dev.children:
5105
        _CreateBlockDev(self, new_node, instance, new_lv, True,
5106
                        _GetInstanceInfoText(instance), False)
5107

    
5108
    # Step 4: dbrd minors and drbd setups changes
5109
    # after this, we must manually remove the drbd minors on both the
5110
    # error and the success paths
5111
    minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
5112
                                   instance.name)
5113
    logging.debug("Allocated minors %s" % (minors,))
5114
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
5115
    for idx, (dev, new_minor) in enumerate(zip(instance.disks, minors)):
5116
      size = dev.size
5117
      info("activating a new drbd on %s for disk/%d" % (new_node, idx))
5118
      # create new devices on new_node; note that we create two IDs:
5119
      # one without port, so the drbd will be activated without
5120
      # networking information on the new node at this stage, and one
5121
      # with network, for the latter activation in step 4
5122
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
5123
      if pri_node == o_node1:
5124
        p_minor = o_minor1
5125
      else:
5126
        p_minor = o_minor2
5127

    
5128
      new_alone_id = (pri_node, new_node, None, p_minor, new_minor, o_secret)
5129
      new_net_id = (pri_node, new_node, o_port, p_minor, new_minor, o_secret)
5130

    
5131
      iv_names[idx] = (dev, dev.children, new_net_id)
5132
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
5133
                    new_net_id)
5134
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
5135
                              logical_id=new_alone_id,
5136
                              children=dev.children)
5137
      try:
5138
        _CreateSingleBlockDev(self, new_node, instance, new_drbd,
5139
                              _GetInstanceInfoText(instance), False)
5140
      except errors.BlockDeviceError:
5141
        self.cfg.ReleaseDRBDMinors(instance.name)
5142
        raise
5143

    
5144
    for idx, dev in enumerate(instance.disks):
5145
      # we have new devices, shutdown the drbd on the old secondary
5146
      info("shutting down drbd for disk/%d on old node" % idx)
5147
      cfg.SetDiskID(dev, old_node)
5148
      result = self.rpc.call_blockdev_shutdown(old_node, dev)
5149
      if result.failed or not result.data:
5150
        warning("Failed to shutdown drbd for disk/%d on old node" % idx,
5151
                hint="Please cleanup this device manually as soon as possible")
5152

    
5153
    info("detaching primary drbds from the network (=> standalone)")
5154
    result = self.rpc.call_drbd_disconnect_net([pri_node], nodes_ip,
5155
                                               instance.disks)[pri_node]
5156

    
5157
    msg = result.RemoteFailMsg()
5158
    if msg:
5159
      # detaches didn't succeed (unlikely)
5160
      self.cfg.ReleaseDRBDMinors(instance.name)
5161
      raise errors.OpExecError("Can't detach the disks from the network on"
5162
                               " old node: %s" % (msg,))
5163

    
5164
    # if we managed to detach at least one, we update all the disks of
5165
    # the instance to point to the new secondary
5166
    info("updating instance configuration")
5167
    for dev, _, new_logical_id in iv_names.itervalues():
5168
      dev.logical_id = new_logical_id
5169
      cfg.SetDiskID(dev, pri_node)
5170
    cfg.Update(instance)
5171

    
5172
    # and now perform the drbd attach
5173
    info("attaching primary drbds to new secondary (standalone => connected)")
5174
    result = self.rpc.call_drbd_attach_net([pri_node, new_node], nodes_ip,
5175
                                           instance.disks, instance.name,
5176
                                           False)
5177
    for to_node, to_result in result.items():
5178
      msg = to_result.RemoteFailMsg()
5179
      if msg:
5180
        warning("can't attach drbd disks on node %s: %s", to_node, msg,
5181
                hint="please do a gnt-instance info to see the"
5182
                " status of disks")
5183

    
5184
    # this can fail as the old devices are degraded and _WaitForSync
5185
    # does a combined result over all disks, so we don't check its
5186
    # return value
5187
    self.proc.LogStep(5, steps_total, "sync devices")
5188
    _WaitForSync(self, instance, unlock=True)
5189

    
5190
    # so check manually all the devices
5191
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
5192
      cfg.SetDiskID(dev, pri_node)
5193
      result = self.rpc.call_blockdev_find(pri_node, dev)
5194
      result.Raise()
5195
      if result.data[5]:
5196
        raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx)
5197

    
5198
    self.proc.LogStep(6, steps_total, "removing old storage")
5199
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
5200
      info("remove logical volumes for disk/%d" % idx)
5201
      for lv in old_lvs:
5202
        cfg.SetDiskID(lv, old_node)
5203
        result = self.rpc.call_blockdev_remove(old_node, lv)
5204
        if result.failed or not result.data:
5205
          warning("Can't remove LV on old secondary",
5206
                  hint="Cleanup stale volumes by hand")
5207

    
5208
  def Exec(self, feedback_fn):
5209
    """Execute disk replacement.
5210

5211
    This dispatches the disk replacement to the appropriate handler.
5212

5213
    """
5214
    instance = self.instance
5215

    
5216
    # Activate the instance disks if we're replacing them on a down instance
5217
    if not instance.admin_up:
5218
      _StartInstanceDisks(self, instance, True)
5219

    
5220
    if self.op.mode == constants.REPLACE_DISK_CHG:
5221
      fn = self._ExecD8Secondary
5222
    else:
5223
      fn = self._ExecD8DiskOnly
5224

    
5225
    ret = fn(feedback_fn)
5226

    
5227
    # Deactivate the instance disks if we're replacing them on a down instance
5228
    if not instance.admin_up:
5229
      _SafeShutdownInstanceDisks(self, instance)
5230

    
5231
    return ret
5232

    
5233

    
5234
class LUGrowDisk(LogicalUnit):
5235
  """Grow a disk of an instance.
5236

5237
  """
5238
  HPATH = "disk-grow"
5239
  HTYPE = constants.HTYPE_INSTANCE
5240
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
5241
  REQ_BGL = False
5242

    
5243
  def ExpandNames(self):
5244
    self._ExpandAndLockInstance()
5245
    self.needed_locks[locking.LEVEL_NODE] = []
5246
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5247

    
5248
  def DeclareLocks(self, level):
5249
    if level == locking.LEVEL_NODE:
5250
      self._LockInstancesNodes()
5251

    
5252
  def BuildHooksEnv(self):
5253
    """Build hooks env.
5254

5255
    This runs on the master, the primary and all the secondaries.
5256

5257
    """
5258
    env = {
5259
      "DISK": self.op.disk,
5260
      "AMOUNT": self.op.amount,
5261
      }
5262
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5263
    nl = [
5264
      self.cfg.GetMasterNode(),
5265
      self.instance.primary_node,
5266
      ]
5267
    return env, nl, nl
5268

    
5269
  def CheckPrereq(self):
5270
    """Check prerequisites.
5271

5272
    This checks that the instance is in the cluster.
5273

5274
    """
5275
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5276
    assert instance is not None, \
5277
      "Cannot retrieve locked instance %s" % self.op.instance_name
5278
    nodenames = list(instance.all_nodes)
5279
    for node in nodenames:
5280
      _CheckNodeOnline(self, node)
5281

    
5282

    
5283
    self.instance = instance
5284

    
5285
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
5286
      raise errors.OpPrereqError("Instance's disk layout does not support"
5287
                                 " growing.")
5288

    
5289
    self.disk = instance.FindDisk(self.op.disk)
5290

    
5291
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
5292
                                       instance.hypervisor)
5293
    for node in nodenames:
5294
      info = nodeinfo[node]
5295
      if info.failed or not info.data:
5296
        raise errors.OpPrereqError("Cannot get current information"
5297
                                   " from node '%s'" % node)
5298
      vg_free = info.data.get('vg_free', None)
5299
      if not isinstance(vg_free, int):
5300
        raise errors.OpPrereqError("Can't compute free disk space on"
5301
                                   " node %s" % node)
5302
      if self.op.amount > vg_free:
5303
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
5304
                                   " %d MiB available, %d MiB required" %
5305
                                   (node, vg_free, self.op.amount))
5306

    
5307
  def Exec(self, feedback_fn):
5308
    """Execute disk grow.
5309

5310
    """
5311
    instance = self.instance
5312
    disk = self.disk
5313
    for node in instance.all_nodes:
5314
      self.cfg.SetDiskID(disk, node)
5315
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
5316
      result.Raise()
5317
      if (not result.data or not isinstance(result.data, (list, tuple)) or
5318
          len(result.data) != 2):
5319
        raise errors.OpExecError("Grow request failed to node %s" % node)
5320
      elif not result.data[0]:
5321
        raise errors.OpExecError("Grow request failed to node %s: %s" %
5322
                                 (node, result.data[1]))
5323
    disk.RecordGrow(self.op.amount)
5324
    self.cfg.Update(instance)
5325
    if self.op.wait_for_sync:
5326
      disk_abort = not _WaitForSync(self, instance)
5327
      if disk_abort:
5328
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
5329
                             " status.\nPlease check the instance.")
5330

    
5331

    
5332
class LUQueryInstanceData(NoHooksLU):
5333
  """Query runtime instance data.
5334

5335
  """
5336
  _OP_REQP = ["instances", "static"]
5337
  REQ_BGL = False
5338

    
5339
  def ExpandNames(self):
5340
    self.needed_locks = {}
5341
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
5342

    
5343
    if not isinstance(self.op.instances, list):
5344
      raise errors.OpPrereqError("Invalid argument type 'instances'")
5345

    
5346
    if self.op.instances:
5347
      self.wanted_names = []
5348
      for name in self.op.instances:
5349
        full_name = self.cfg.ExpandInstanceName(name)
5350
        if full_name is None:
5351
          raise errors.OpPrereqError("Instance '%s' not known" % name)
5352
        self.wanted_names.append(full_name)
5353
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
5354
    else:
5355
      self.wanted_names = None
5356
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
5357

    
5358
    self.needed_locks[locking.LEVEL_NODE] = []
5359
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5360

    
5361
  def DeclareLocks(self, level):
5362
    if level == locking.LEVEL_NODE:
5363
      self._LockInstancesNodes()
5364

    
5365
  def CheckPrereq(self):
5366
    """Check prerequisites.
5367

5368
    This only checks the optional instance list against the existing names.
5369

5370
    """
5371
    if self.wanted_names is None:
5372
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
5373

    
5374
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
5375
                             in self.wanted_names]
5376
    return
5377

    
5378
  def _ComputeDiskStatus(self, instance, snode, dev):
5379
    """Compute block device status.
5380

5381
    """
5382
    static = self.op.static
5383
    if not static:
5384
      self.cfg.SetDiskID(dev, instance.primary_node)
5385
      dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
5386
      dev_pstatus.Raise()
5387
      dev_pstatus = dev_pstatus.data
5388
    else:
5389
      dev_pstatus = None
5390

    
5391
    if dev.dev_type in constants.LDS_DRBD:
5392
      # we change the snode then (otherwise we use the one passed in)
5393
      if dev.logical_id[0] == instance.primary_node:
5394
        snode = dev.logical_id[1]
5395
      else:
5396
        snode = dev.logical_id[0]
5397

    
5398
    if snode and not static:
5399
      self.cfg.SetDiskID(dev, snode)
5400
      dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
5401
      dev_sstatus.Raise()
5402
      dev_sstatus = dev_sstatus.data
5403
    else:
5404
      dev_sstatus = None
5405

    
5406
    if dev.children:
5407
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
5408
                      for child in dev.children]
5409
    else:
5410
      dev_children = []
5411

    
5412
    data = {
5413
      "iv_name": dev.iv_name,
5414
      "dev_type": dev.dev_type,
5415
      "logical_id": dev.logical_id,
5416
      "physical_id": dev.physical_id,
5417
      "pstatus": dev_pstatus,
5418
      "sstatus": dev_sstatus,
5419
      "children": dev_children,
5420
      "mode": dev.mode,
5421
      }
5422

    
5423
    return data
5424

    
5425
  def Exec(self, feedback_fn):
5426
    """Gather and return data"""
5427
    result = {}
5428

    
5429
    cluster = self.cfg.GetClusterInfo()
5430

    
5431
    for instance in self.wanted_instances:
5432
      if not self.op.static:
5433
        remote_info = self.rpc.call_instance_info(instance.primary_node,
5434
                                                  instance.name,
5435
                                                  instance.hypervisor)
5436
        remote_info.Raise()
5437
        remote_info = remote_info.data
5438
        if remote_info and "state" in remote_info:
5439
          remote_state = "up"
5440
        else:
5441
          remote_state = "down"
5442
      else:
5443
        remote_state = None
5444
      if instance.admin_up:
5445
        config_state = "up"
5446
      else:
5447
        config_state = "down"
5448

    
5449
      disks = [self._ComputeDiskStatus(instance, None, device)
5450
               for device in instance.disks]
5451

    
5452
      idict = {
5453
        "name": instance.name,
5454
        "config_state": config_state,
5455
        "run_state": remote_state,
5456
        "pnode": instance.primary_node,
5457
        "snodes": instance.secondary_nodes,
5458
        "os": instance.os,
5459
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
5460
        "disks": disks,
5461
        "hypervisor": instance.hypervisor,
5462
        "network_port": instance.network_port,
5463
        "hv_instance": instance.hvparams,
5464
        "hv_actual": cluster.FillHV(instance),
5465
        "be_instance": instance.beparams,
5466
        "be_actual": cluster.FillBE(instance),
5467
        }
5468

    
5469
      result[instance.name] = idict
5470

    
5471
    return result
5472

    
5473

    
5474
class LUSetInstanceParams(LogicalUnit):
5475
  """Modifies an instances's parameters.
5476

5477
  """
5478
  HPATH = "instance-modify"
5479
  HTYPE = constants.HTYPE_INSTANCE
5480
  _OP_REQP = ["instance_name"]
5481
  REQ_BGL = False
5482

    
5483
  def CheckArguments(self):
5484
    if not hasattr(self.op, 'nics'):
5485
      self.op.nics = []
5486
    if not hasattr(self.op, 'disks'):
5487
      self.op.disks = []
5488
    if not hasattr(self.op, 'beparams'):
5489
      self.op.beparams = {}
5490
    if not hasattr(self.op, 'hvparams'):
5491
      self.op.hvparams = {}
5492
    self.op.force = getattr(self.op, "force", False)
5493
    if not (self.op.nics or self.op.disks or
5494
            self.op.hvparams or self.op.beparams):
5495
      raise errors.OpPrereqError("No changes submitted")
5496

    
5497
    utils.CheckBEParams(self.op.beparams)
5498

    
5499
    # Disk validation
5500
    disk_addremove = 0
5501
    for disk_op, disk_dict in self.op.disks:
5502
      if disk_op == constants.DDM_REMOVE:
5503
        disk_addremove += 1
5504
        continue
5505
      elif disk_op == constants.DDM_ADD:
5506
        disk_addremove += 1
5507
      else:
5508
        if not isinstance(disk_op, int):
5509
          raise errors.OpPrereqError("Invalid disk index")
5510
      if disk_op == constants.DDM_ADD:
5511
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
5512
        if mode not in (constants.DISK_RDONLY, constants.DISK_RDWR):
5513
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode)
5514
        size = disk_dict.get('size', None)
5515
        if size is None:
5516
          raise errors.OpPrereqError("Required disk parameter size missing")
5517
        try:
5518
          size = int(size)
5519
        except ValueError, err:
5520
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
5521
                                     str(err))
5522
        disk_dict['size'] = size
5523
      else:
5524
        # modification of disk
5525
        if 'size' in disk_dict:
5526
          raise errors.OpPrereqError("Disk size change not possible, use"
5527
                                     " grow-disk")
5528

    
5529
    if disk_addremove > 1:
5530
      raise errors.OpPrereqError("Only one disk add or remove operation"
5531
                                 " supported at a time")
5532

    
5533
    # NIC validation
5534
    nic_addremove = 0
5535
    for nic_op, nic_dict in self.op.nics:
5536
      if nic_op == constants.DDM_REMOVE:
5537
        nic_addremove += 1
5538
        continue
5539
      elif nic_op == constants.DDM_ADD:
5540
        nic_addremove += 1
5541
      else:
5542
        if not isinstance(nic_op, int):
5543
          raise errors.OpPrereqError("Invalid nic index")
5544

    
5545
      # nic_dict should be a dict
5546
      nic_ip = nic_dict.get('ip', None)
5547
      if nic_ip is not None:
5548
        if nic_ip.lower() == "none":
5549
          nic_dict['ip'] = None
5550
        else:
5551
          if not utils.IsValidIP(nic_ip):
5552
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip)
5553
      # we can only check None bridges and assign the default one
5554
      nic_bridge = nic_dict.get('bridge', None)
5555
      if nic_bridge is None:
5556
        nic_dict['bridge'] = self.cfg.GetDefBridge()
5557
      # but we can validate MACs
5558
      nic_mac = nic_dict.get('mac', None)
5559
      if nic_mac is not None:
5560
        if self.cfg.IsMacInUse(nic_mac):
5561
          raise errors.OpPrereqError("MAC address %s already in use"
5562
                                     " in cluster" % nic_mac)
5563
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5564
          if not utils.IsValidMac(nic_mac):
5565
            raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac)
5566
    if nic_addremove > 1:
5567
      raise errors.OpPrereqError("Only one NIC add or remove operation"
5568
                                 " supported at a time")
5569

    
5570
  def ExpandNames(self):
5571
    self._ExpandAndLockInstance()
5572
    self.needed_locks[locking.LEVEL_NODE] = []
5573
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5574

    
5575
  def DeclareLocks(self, level):
5576
    if level == locking.LEVEL_NODE:
5577
      self._LockInstancesNodes()
5578

    
5579
  def BuildHooksEnv(self):
5580
    """Build hooks env.
5581

5582
    This runs on the master, primary and secondaries.
5583

5584
    """
5585
    args = dict()
5586
    if constants.BE_MEMORY in self.be_new:
5587
      args['memory'] = self.be_new[constants.BE_MEMORY]
5588
    if constants.BE_VCPUS in self.be_new:
5589
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
5590
    # FIXME: readd disk/nic changes
5591
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
5592
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5593
    return env, nl, nl
5594

    
5595
  def CheckPrereq(self):
5596
    """Check prerequisites.
5597

5598
    This only checks the instance list against the existing names.
5599

5600
    """
5601
    force = self.force = self.op.force
5602

    
5603
    # checking the new params on the primary/secondary nodes
5604

    
5605
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5606
    assert self.instance is not None, \
5607
      "Cannot retrieve locked instance %s" % self.op.instance_name
5608
    pnode = instance.primary_node
5609
    nodelist = list(instance.all_nodes)
5610

    
5611
    # hvparams processing
5612
    if self.op.hvparams:
5613
      i_hvdict = copy.deepcopy(instance.hvparams)
5614
      for key, val in self.op.hvparams.iteritems():
5615
        if val == constants.VALUE_DEFAULT:
5616
          try:
5617
            del i_hvdict[key]
5618
          except KeyError:
5619
            pass
5620
        elif val == constants.VALUE_NONE:
5621
          i_hvdict[key] = None
5622
        else:
5623
          i_hvdict[key] = val
5624
      cluster = self.cfg.GetClusterInfo()
5625
      hv_new = cluster.FillDict(cluster.hvparams[instance.hypervisor],
5626
                                i_hvdict)
5627
      # local check
5628
      hypervisor.GetHypervisor(
5629
        instance.hypervisor).CheckParameterSyntax(hv_new)
5630
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
5631
      self.hv_new = hv_new # the new actual values
5632
      self.hv_inst = i_hvdict # the new dict (without defaults)
5633
    else:
5634
      self.hv_new = self.hv_inst = {}
5635

    
5636
    # beparams processing
5637
    if self.op.beparams:
5638
      i_bedict = copy.deepcopy(instance.beparams)
5639
      for key, val in self.op.beparams.iteritems():
5640
        if val == constants.VALUE_DEFAULT:
5641
          try:
5642
            del i_bedict[key]
5643
          except KeyError:
5644
            pass
5645
        else:
5646
          i_bedict[key] = val
5647
      cluster = self.cfg.GetClusterInfo()
5648
      be_new = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
5649
                                i_bedict)
5650
      self.be_new = be_new # the new actual values
5651
      self.be_inst = i_bedict # the new dict (without defaults)
5652
    else:
5653
      self.be_new = self.be_inst = {}
5654

    
5655
    self.warn = []
5656

    
5657
    if constants.BE_MEMORY in self.op.beparams and not self.force:
5658
      mem_check_list = [pnode]
5659
      if be_new[constants.BE_AUTO_BALANCE]:
5660
        # either we changed auto_balance to yes or it was from before
5661
        mem_check_list.extend(instance.secondary_nodes)
5662
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
5663
                                                  instance.hypervisor)
5664
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
5665
                                         instance.hypervisor)
5666
      if nodeinfo[pnode].failed or not isinstance(nodeinfo[pnode].data, dict):
5667
        # Assume the primary node is unreachable and go ahead
5668
        self.warn.append("Can't get info from primary node %s" % pnode)
5669
      else:
5670
        if not instance_info.failed and instance_info.data:
5671
          current_mem = instance_info.data['memory']
5672
        else:
5673
          # Assume instance not running
5674
          # (there is a slight race condition here, but it's not very probable,
5675
          # and we have no other way to check)
5676
          current_mem = 0
5677
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
5678
                    nodeinfo[pnode].data['memory_free'])
5679
        if miss_mem > 0:
5680
          raise errors.OpPrereqError("This change will prevent the instance"
5681
                                     " from starting, due to %d MB of memory"
5682
                                     " missing on its primary node" % miss_mem)
5683

    
5684
      if be_new[constants.BE_AUTO_BALANCE]:
5685
        for node, nres in nodeinfo.iteritems():
5686
          if node not in instance.secondary_nodes:
5687
            continue
5688
          if nres.failed or not isinstance(nres.data, dict):
5689
            self.warn.append("Can't get info from secondary node %s" % node)
5690
          elif be_new[constants.BE_MEMORY] > nres.data['memory_free']:
5691
            self.warn.append("Not enough memory to failover instance to"
5692
                             " secondary node %s" % node)
5693

    
5694
    # NIC processing
5695
    for nic_op, nic_dict in self.op.nics:
5696
      if nic_op == constants.DDM_REMOVE:
5697
        if not instance.nics:
5698
          raise errors.OpPrereqError("Instance has no NICs, cannot remove")
5699
        continue
5700
      if nic_op != constants.DDM_ADD:
5701
        # an existing nic
5702
        if nic_op < 0 or nic_op >= len(instance.nics):
5703
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
5704
                                     " are 0 to %d" %
5705
                                     (nic_op, len(instance.nics)))
5706
      nic_bridge = nic_dict.get('bridge', None)
5707
      if nic_bridge is not None:
5708
        if not self.rpc.call_bridges_exist(pnode, [nic_bridge]):
5709
          msg = ("Bridge '%s' doesn't exist on one of"
5710
                 " the instance nodes" % nic_bridge)
5711
          if self.force:
5712
            self.warn.append(msg)
5713
          else:
5714
            raise errors.OpPrereqError(msg)
5715

    
5716
    # DISK processing
5717
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
5718
      raise errors.OpPrereqError("Disk operations not supported for"
5719
                                 " diskless instances")
5720
    for disk_op, disk_dict in self.op.disks:
5721
      if disk_op == constants.DDM_REMOVE:
5722
        if len(instance.disks) == 1:
5723
          raise errors.OpPrereqError("Cannot remove the last disk of"
5724
                                     " an instance")
5725
        ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
5726
        ins_l = ins_l[pnode]
5727
        if ins_l.failed or not isinstance(ins_l.data, list):
5728
          raise errors.OpPrereqError("Can't contact node '%s'" % pnode)
5729
        if instance.name in ins_l.data:
5730
          raise errors.OpPrereqError("Instance is running, can't remove"
5731
                                     " disks.")
5732

    
5733
      if (disk_op == constants.DDM_ADD and
5734
          len(instance.nics) >= constants.MAX_DISKS):
5735
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
5736
                                   " add more" % constants.MAX_DISKS)
5737
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
5738
        # an existing disk
5739
        if disk_op < 0 or disk_op >= len(instance.disks):
5740
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
5741
                                     " are 0 to %d" %
5742
                                     (disk_op, len(instance.disks)))
5743

    
5744
    return
5745

    
5746
  def Exec(self, feedback_fn):
5747
    """Modifies an instance.
5748

5749
    All parameters take effect only at the next restart of the instance.
5750

5751
    """
5752
    # Process here the warnings from CheckPrereq, as we don't have a
5753
    # feedback_fn there.
5754
    for warn in self.warn:
5755
      feedback_fn("WARNING: %s" % warn)
5756

    
5757
    result = []
5758
    instance = self.instance
5759
    # disk changes
5760
    for disk_op, disk_dict in self.op.disks:
5761
      if disk_op == constants.DDM_REMOVE:
5762
        # remove the last disk
5763
        device = instance.disks.pop()
5764
        device_idx = len(instance.disks)
5765
        for node, disk in device.ComputeNodeTree(instance.primary_node):
5766
          self.cfg.SetDiskID(disk, node)
5767
          rpc_result = self.rpc.call_blockdev_remove(node, disk)
5768
          if rpc_result.failed or not rpc_result.data:
5769
            self.proc.LogWarning("Could not remove disk/%d on node %s,"
5770
                                 " continuing anyway", device_idx, node)
5771
        result.append(("disk/%d" % device_idx, "remove"))
5772
      elif disk_op == constants.DDM_ADD:
5773
        # add a new disk
5774
        if instance.disk_template == constants.DT_FILE:
5775
          file_driver, file_path = instance.disks[0].logical_id
5776
          file_path = os.path.dirname(file_path)
5777
        else:
5778
          file_driver = file_path = None
5779
        disk_idx_base = len(instance.disks)
5780
        new_disk = _GenerateDiskTemplate(self,
5781
                                         instance.disk_template,
5782
                                         instance.name, instance.primary_node,
5783
                                         instance.secondary_nodes,
5784
                                         [disk_dict],
5785
                                         file_path,
5786
                                         file_driver,
5787
                                         disk_idx_base)[0]
5788
        new_disk.mode = disk_dict['mode']
5789
        instance.disks.append(new_disk)
5790
        info = _GetInstanceInfoText(instance)
5791

    
5792
        logging.info("Creating volume %s for instance %s",
5793
                     new_disk.iv_name, instance.name)
5794
        # Note: this needs to be kept in sync with _CreateDisks
5795
        #HARDCODE
5796
        for node in instance.all_nodes:
5797
          f_create = node == instance.primary_node
5798
          try:
5799
            _CreateBlockDev(self, node, instance, new_disk,
5800
                            f_create, info, f_create)
5801
          except errors.OpExecError, err:
5802
            self.LogWarning("Failed to create volume %s (%s) on"
5803
                            " node %s: %s",
5804
                            new_disk.iv_name, new_disk, node, err)
5805
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
5806
                       (new_disk.size, new_disk.mode)))
5807
      else:
5808
        # change a given disk
5809
        instance.disks[disk_op].mode = disk_dict['mode']
5810
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
5811
    # NIC changes
5812
    for nic_op, nic_dict in self.op.nics:
5813
      if nic_op == constants.DDM_REMOVE:
5814
        # remove the last nic
5815
        del instance.nics[-1]
5816
        result.append(("nic.%d" % len(instance.nics), "remove"))
5817
      elif nic_op == constants.DDM_ADD:
5818
        # add a new nic
5819
        if 'mac' not in nic_dict:
5820
          mac = constants.VALUE_GENERATE
5821
        else:
5822
          mac = nic_dict['mac']
5823
        if mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5824
          mac = self.cfg.GenerateMAC()
5825
        new_nic = objects.NIC(mac=mac, ip=nic_dict.get('ip', None),
5826
                              bridge=nic_dict.get('bridge', None))
5827
        instance.nics.append(new_nic)
5828
        result.append(("nic.%d" % (len(instance.nics) - 1),
5829
                       "add:mac=%s,ip=%s,bridge=%s" %
5830
                       (new_nic.mac, new_nic.ip, new_nic.bridge)))
5831
      else:
5832
        # change a given nic
5833
        for key in 'mac', 'ip', 'bridge':
5834
          if key in nic_dict:
5835
            setattr(instance.nics[nic_op], key, nic_dict[key])
5836
            result.append(("nic.%s/%d" % (key, nic_op), nic_dict[key]))
5837

    
5838
    # hvparams changes
5839
    if self.op.hvparams:
5840
      instance.hvparams = self.hv_new
5841
      for key, val in self.op.hvparams.iteritems():
5842
        result.append(("hv/%s" % key, val))
5843

    
5844
    # beparams changes
5845
    if self.op.beparams:
5846
      instance.beparams = self.be_inst
5847
      for key, val in self.op.beparams.iteritems():
5848
        result.append(("be/%s" % key, val))
5849

    
5850
    self.cfg.Update(instance)
5851

    
5852
    return result
5853

    
5854

    
5855
class LUQueryExports(NoHooksLU):
5856
  """Query the exports list
5857

5858
  """
5859
  _OP_REQP = ['nodes']
5860
  REQ_BGL = False
5861

    
5862
  def ExpandNames(self):
5863
    self.needed_locks = {}
5864
    self.share_locks[locking.LEVEL_NODE] = 1
5865
    if not self.op.nodes:
5866
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5867
    else:
5868
      self.needed_locks[locking.LEVEL_NODE] = \
5869
        _GetWantedNodes(self, self.op.nodes)
5870

    
5871
  def CheckPrereq(self):
5872
    """Check prerequisites.
5873

5874
    """
5875
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
5876

    
5877
  def Exec(self, feedback_fn):
5878
    """Compute the list of all the exported system images.
5879

5880
    @rtype: dict
5881
    @return: a dictionary with the structure node->(export-list)
5882
        where export-list is a list of the instances exported on
5883
        that node.
5884

5885
    """
5886
    rpcresult = self.rpc.call_export_list(self.nodes)
5887
    result = {}
5888
    for node in rpcresult:
5889
      if rpcresult[node].failed:
5890
        result[node] = False
5891
      else:
5892
        result[node] = rpcresult[node].data
5893

    
5894
    return result
5895

    
5896

    
5897
class LUExportInstance(LogicalUnit):
5898
  """Export an instance to an image in the cluster.
5899

5900
  """
5901
  HPATH = "instance-export"
5902
  HTYPE = constants.HTYPE_INSTANCE
5903
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
5904
  REQ_BGL = False
5905

    
5906
  def ExpandNames(self):
5907
    self._ExpandAndLockInstance()
5908
    # FIXME: lock only instance primary and destination node
5909
    #
5910
    # Sad but true, for now we have do lock all nodes, as we don't know where
5911
    # the previous export might be, and and in this LU we search for it and
5912
    # remove it from its current node. In the future we could fix this by:
5913
    #  - making a tasklet to search (share-lock all), then create the new one,
5914
    #    then one to remove, after
5915
    #  - removing the removal operation altoghether
5916
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5917

    
5918
  def DeclareLocks(self, level):
5919
    """Last minute lock declaration."""
5920
    # All nodes are locked anyway, so nothing to do here.
5921

    
5922
  def BuildHooksEnv(self):
5923
    """Build hooks env.
5924

5925
    This will run on the master, primary node and target node.
5926

5927
    """
5928
    env = {
5929
      "EXPORT_NODE": self.op.target_node,
5930
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
5931
      }
5932
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5933
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
5934
          self.op.target_node]
5935
    return env, nl, nl
5936

    
5937
  def CheckPrereq(self):
5938
    """Check prerequisites.
5939

5940
    This checks that the instance and node names are valid.
5941

5942
    """
5943
    instance_name = self.op.instance_name
5944
    self.instance = self.cfg.GetInstanceInfo(instance_name)
5945
    assert self.instance is not None, \
5946
          "Cannot retrieve locked instance %s" % self.op.instance_name
5947
    _CheckNodeOnline(self, self.instance.primary_node)
5948

    
5949
    self.dst_node = self.cfg.GetNodeInfo(
5950
      self.cfg.ExpandNodeName(self.op.target_node))
5951

    
5952
    if self.dst_node is None:
5953
      # This is wrong node name, not a non-locked node
5954
      raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node)
5955
    _CheckNodeOnline(self, self.dst_node.name)
5956

    
5957
    # instance disk type verification
5958
    for disk in self.instance.disks:
5959
      if disk.dev_type == constants.LD_FILE:
5960
        raise errors.OpPrereqError("Export not supported for instances with"
5961
                                   " file-based disks")
5962

    
5963
  def Exec(self, feedback_fn):
5964
    """Export an instance to an image in the cluster.
5965

5966
    """
5967
    instance = self.instance
5968
    dst_node = self.dst_node
5969
    src_node = instance.primary_node
5970
    if self.op.shutdown:
5971
      # shutdown the instance, but not the disks
5972
      result = self.rpc.call_instance_shutdown(src_node, instance)
5973
      result.Raise()
5974
      if not result.data:
5975
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
5976
                                 (instance.name, src_node))
5977

    
5978
    vgname = self.cfg.GetVGName()
5979

    
5980
    snap_disks = []
5981

    
5982
    # set the disks ID correctly since call_instance_start needs the
5983
    # correct drbd minor to create the symlinks
5984
    for disk in instance.disks:
5985
      self.cfg.SetDiskID(disk, src_node)
5986

    
5987
    try:
5988
      for disk in instance.disks:
5989
        # new_dev_name will be a snapshot of an lvm leaf of the one we passed
5990
        new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk)
5991
        if new_dev_name.failed or not new_dev_name.data:
5992
          self.LogWarning("Could not snapshot block device %s on node %s",
5993
                          disk.logical_id[1], src_node)
5994
          snap_disks.append(False)
5995
        else:
5996
          new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
5997
                                 logical_id=(vgname, new_dev_name.data),
5998
                                 physical_id=(vgname, new_dev_name.data),
5999
                                 iv_name=disk.iv_name)
6000
          snap_disks.append(new_dev)
6001

    
6002
    finally:
6003
      if self.op.shutdown and instance.admin_up:
6004
        result = self.rpc.call_instance_start(src_node, instance, None)
6005
        msg = result.RemoteFailMsg()
6006
        if msg:
6007
          _ShutdownInstanceDisks(self, instance)
6008
          raise errors.OpExecError("Could not start instance: %s" % msg)
6009

    
6010
    # TODO: check for size
6011

    
6012
    cluster_name = self.cfg.GetClusterName()
6013
    for idx, dev in enumerate(snap_disks):
6014
      if dev:
6015
        result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
6016
                                               instance, cluster_name, idx)
6017
        if result.failed or not result.data:
6018
          self.LogWarning("Could not export block device %s from node %s to"
6019
                          " node %s", dev.logical_id[1], src_node,
6020
                          dst_node.name)
6021
        result = self.rpc.call_blockdev_remove(src_node, dev)
6022
        if result.failed or not result.data:
6023
          self.LogWarning("Could not remove snapshot block device %s from node"
6024
                          " %s", dev.logical_id[1], src_node)
6025

    
6026
    result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
6027
    if result.failed or not result.data:
6028
      self.LogWarning("Could not finalize export for instance %s on node %s",
6029
                      instance.name, dst_node.name)
6030

    
6031
    nodelist = self.cfg.GetNodeList()
6032
    nodelist.remove(dst_node.name)
6033

    
6034
    # on one-node clusters nodelist will be empty after the removal
6035
    # if we proceed the backup would be removed because OpQueryExports
6036
    # substitutes an empty list with the full cluster node list.
6037
    if nodelist:
6038
      exportlist = self.rpc.call_export_list(nodelist)
6039
      for node in exportlist:
6040
        if exportlist[node].failed:
6041
          continue
6042
        if instance.name in exportlist[node].data:
6043
          if not self.rpc.call_export_remove(node, instance.name):
6044
            self.LogWarning("Could not remove older export for instance %s"
6045
                            " on node %s", instance.name, node)
6046

    
6047

    
6048
class LURemoveExport(NoHooksLU):
6049
  """Remove exports related to the named instance.
6050

6051
  """
6052
  _OP_REQP = ["instance_name"]
6053
  REQ_BGL = False
6054

    
6055
  def ExpandNames(self):
6056
    self.needed_locks = {}
6057
    # We need all nodes to be locked in order for RemoveExport to work, but we
6058
    # don't need to lock the instance itself, as nothing will happen to it (and
6059
    # we can remove exports also for a removed instance)
6060
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6061

    
6062
  def CheckPrereq(self):
6063
    """Check prerequisites.
6064
    """
6065
    pass
6066

    
6067
  def Exec(self, feedback_fn):
6068
    """Remove any export.
6069

6070
    """
6071
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
6072
    # If the instance was not found we'll try with the name that was passed in.
6073
    # This will only work if it was an FQDN, though.
6074
    fqdn_warn = False
6075
    if not instance_name:
6076
      fqdn_warn = True
6077
      instance_name = self.op.instance_name
6078

    
6079
    exportlist = self.rpc.call_export_list(self.acquired_locks[
6080
      locking.LEVEL_NODE])
6081
    found = False
6082
    for node in exportlist:
6083
      if exportlist[node].failed:
6084
        self.LogWarning("Failed to query node %s, continuing" % node)
6085
        continue
6086
      if instance_name in exportlist[node].data:
6087
        found = True
6088
        result = self.rpc.call_export_remove(node, instance_name)
6089
        if result.failed or not result.data:
6090
          logging.error("Could not remove export for instance %s"
6091
                        " on node %s", instance_name, node)
6092

    
6093
    if fqdn_warn and not found:
6094
      feedback_fn("Export not found. If trying to remove an export belonging"
6095
                  " to a deleted instance please use its Fully Qualified"
6096
                  " Domain Name.")
6097

    
6098

    
6099
class TagsLU(NoHooksLU):
6100
  """Generic tags LU.
6101

6102
  This is an abstract class which is the parent of all the other tags LUs.
6103

6104
  """
6105

    
6106
  def ExpandNames(self):
6107
    self.needed_locks = {}
6108
    if self.op.kind == constants.TAG_NODE:
6109
      name = self.cfg.ExpandNodeName(self.op.name)
6110
      if name is None:
6111
        raise errors.OpPrereqError("Invalid node name (%s)" %
6112
                                   (self.op.name,))
6113
      self.op.name = name
6114
      self.needed_locks[locking.LEVEL_NODE] = name
6115
    elif self.op.kind == constants.TAG_INSTANCE:
6116
      name = self.cfg.ExpandInstanceName(self.op.name)
6117
      if name is None:
6118
        raise errors.OpPrereqError("Invalid instance name (%s)" %
6119
                                   (self.op.name,))
6120
      self.op.name = name
6121
      self.needed_locks[locking.LEVEL_INSTANCE] = name
6122

    
6123
  def CheckPrereq(self):
6124
    """Check prerequisites.
6125

6126
    """
6127
    if self.op.kind == constants.TAG_CLUSTER:
6128
      self.target = self.cfg.GetClusterInfo()
6129
    elif self.op.kind == constants.TAG_NODE:
6130
      self.target = self.cfg.GetNodeInfo(self.op.name)
6131
    elif self.op.kind == constants.TAG_INSTANCE:
6132
      self.target = self.cfg.GetInstanceInfo(self.op.name)
6133
    else:
6134
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
6135
                                 str(self.op.kind))
6136

    
6137

    
6138
class LUGetTags(TagsLU):
6139
  """Returns the tags of a given object.
6140

6141
  """
6142
  _OP_REQP = ["kind", "name"]
6143
  REQ_BGL = False
6144

    
6145
  def Exec(self, feedback_fn):
6146
    """Returns the tag list.
6147

6148
    """
6149
    return list(self.target.GetTags())
6150

    
6151

    
6152
class LUSearchTags(NoHooksLU):
6153
  """Searches the tags for a given pattern.
6154

6155
  """
6156
  _OP_REQP = ["pattern"]
6157
  REQ_BGL = False
6158

    
6159
  def ExpandNames(self):
6160
    self.needed_locks = {}
6161

    
6162
  def CheckPrereq(self):
6163
    """Check prerequisites.
6164

6165
    This checks the pattern passed for validity by compiling it.
6166

6167
    """
6168
    try:
6169
      self.re = re.compile(self.op.pattern)
6170
    except re.error, err:
6171
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
6172
                                 (self.op.pattern, err))
6173

    
6174
  def Exec(self, feedback_fn):
6175
    """Returns the tag list.
6176

6177
    """
6178
    cfg = self.cfg
6179
    tgts = [("/cluster", cfg.GetClusterInfo())]
6180
    ilist = cfg.GetAllInstancesInfo().values()
6181
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
6182
    nlist = cfg.GetAllNodesInfo().values()
6183
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
6184
    results = []
6185
    for path, target in tgts:
6186
      for tag in target.GetTags():
6187
        if self.re.search(tag):
6188
          results.append((path, tag))
6189
    return results
6190

    
6191

    
6192
class LUAddTags(TagsLU):
6193
  """Sets a tag on a given object.
6194

6195
  """
6196
  _OP_REQP = ["kind", "name", "tags"]
6197
  REQ_BGL = False
6198

    
6199
  def CheckPrereq(self):
6200
    """Check prerequisites.
6201

6202
    This checks the type and length of the tag name and value.
6203

6204
    """
6205
    TagsLU.CheckPrereq(self)
6206
    for tag in self.op.tags:
6207
      objects.TaggableObject.ValidateTag(tag)
6208

    
6209
  def Exec(self, feedback_fn):
6210
    """Sets the tag.
6211

6212
    """
6213
    try:
6214
      for tag in self.op.tags:
6215
        self.target.AddTag(tag)
6216
    except errors.TagError, err:
6217
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
6218
    try:
6219
      self.cfg.Update(self.target)
6220
    except errors.ConfigurationError:
6221
      raise errors.OpRetryError("There has been a modification to the"
6222
                                " config file and the operation has been"
6223
                                " aborted. Please retry.")
6224

    
6225

    
6226
class LUDelTags(TagsLU):
6227
  """Delete a list of tags from a given object.
6228

6229
  """
6230
  _OP_REQP = ["kind", "name", "tags"]
6231
  REQ_BGL = False
6232

    
6233
  def CheckPrereq(self):
6234
    """Check prerequisites.
6235

6236
    This checks that we have the given tag.
6237

6238
    """
6239
    TagsLU.CheckPrereq(self)
6240
    for tag in self.op.tags:
6241
      objects.TaggableObject.ValidateTag(tag)
6242
    del_tags = frozenset(self.op.tags)
6243
    cur_tags = self.target.GetTags()
6244
    if not del_tags <= cur_tags:
6245
      diff_tags = del_tags - cur_tags
6246
      diff_names = ["'%s'" % tag for tag in diff_tags]
6247
      diff_names.sort()
6248
      raise errors.OpPrereqError("Tag(s) %s not found" %
6249
                                 (",".join(diff_names)))
6250

    
6251
  def Exec(self, feedback_fn):
6252
    """Remove the tag from the object.
6253

6254
    """
6255
    for tag in self.op.tags:
6256
      self.target.RemoveTag(tag)
6257
    try:
6258
      self.cfg.Update(self.target)
6259
    except errors.ConfigurationError:
6260
      raise errors.OpRetryError("There has been a modification to the"
6261
                                " config file and the operation has been"
6262
                                " aborted. Please retry.")
6263

    
6264

    
6265
class LUTestDelay(NoHooksLU):
6266
  """Sleep for a specified amount of time.
6267

6268
  This LU sleeps on the master and/or nodes for a specified amount of
6269
  time.
6270

6271
  """
6272
  _OP_REQP = ["duration", "on_master", "on_nodes"]
6273
  REQ_BGL = False
6274

    
6275
  def ExpandNames(self):
6276
    """Expand names and set required locks.
6277

6278
    This expands the node list, if any.
6279

6280
    """
6281
    self.needed_locks = {}
6282
    if self.op.on_nodes:
6283
      # _GetWantedNodes can be used here, but is not always appropriate to use
6284
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
6285
      # more information.
6286
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
6287
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
6288

    
6289
  def CheckPrereq(self):
6290
    """Check prerequisites.
6291

6292
    """
6293

    
6294
  def Exec(self, feedback_fn):
6295
    """Do the actual sleep.
6296

6297
    """
6298
    if self.op.on_master:
6299
      if not utils.TestDelay(self.op.duration):
6300
        raise errors.OpExecError("Error during master delay test")
6301
    if self.op.on_nodes:
6302
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
6303
      if not result:
6304
        raise errors.OpExecError("Complete failure from rpc call")
6305
      for node, node_result in result.items():
6306
        node_result.Raise()
6307
        if not node_result.data:
6308
          raise errors.OpExecError("Failure during rpc call to node %s,"
6309
                                   " result: %s" % (node, node_result.data))
6310

    
6311

    
6312
class IAllocator(object):
6313
  """IAllocator framework.
6314

6315
  An IAllocator instance has three sets of attributes:
6316
    - cfg that is needed to query the cluster
6317
    - input data (all members of the _KEYS class attribute are required)
6318
    - four buffer attributes (in|out_data|text), that represent the
6319
      input (to the external script) in text and data structure format,
6320
      and the output from it, again in two formats
6321
    - the result variables from the script (success, info, nodes) for
6322
      easy usage
6323

6324
  """
6325
  _ALLO_KEYS = [
6326
    "mem_size", "disks", "disk_template",
6327
    "os", "tags", "nics", "vcpus", "hypervisor",
6328
    ]
6329
  _RELO_KEYS = [
6330
    "relocate_from",
6331
    ]
6332

    
6333
  def __init__(self, lu, mode, name, **kwargs):
6334
    self.lu = lu
6335
    # init buffer variables
6336
    self.in_text = self.out_text = self.in_data = self.out_data = None
6337
    # init all input fields so that pylint is happy
6338
    self.mode = mode
6339
    self.name = name
6340
    self.mem_size = self.disks = self.disk_template = None
6341
    self.os = self.tags = self.nics = self.vcpus = None
6342
    self.hypervisor = None
6343
    self.relocate_from = None
6344
    # computed fields
6345
    self.required_nodes = None
6346
    # init result fields
6347
    self.success = self.info = self.nodes = None
6348
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6349
      keyset = self._ALLO_KEYS
6350
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6351
      keyset = self._RELO_KEYS
6352
    else:
6353
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
6354
                                   " IAllocator" % self.mode)
6355
    for key in kwargs:
6356
      if key not in keyset:
6357
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
6358
                                     " IAllocator" % key)
6359
      setattr(self, key, kwargs[key])
6360
    for key in keyset:
6361
      if key not in kwargs:
6362
        raise errors.ProgrammerError("Missing input parameter '%s' to"
6363
                                     " IAllocator" % key)
6364
    self._BuildInputData()
6365

    
6366
  def _ComputeClusterData(self):
6367
    """Compute the generic allocator input data.
6368

6369
    This is the data that is independent of the actual operation.
6370

6371
    """
6372
    cfg = self.lu.cfg
6373
    cluster_info = cfg.GetClusterInfo()
6374
    # cluster data
6375
    data = {
6376
      "version": 1,
6377
      "cluster_name": cfg.GetClusterName(),
6378
      "cluster_tags": list(cluster_info.GetTags()),
6379
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
6380
      # we don't have job IDs
6381
      }
6382
    iinfo = cfg.GetAllInstancesInfo().values()
6383
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
6384

    
6385
    # node data
6386
    node_results = {}
6387
    node_list = cfg.GetNodeList()
6388

    
6389
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6390
      hypervisor_name = self.hypervisor
6391
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6392
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
6393

    
6394
    node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
6395
                                           hypervisor_name)
6396
    node_iinfo = self.lu.rpc.call_all_instances_info(node_list,
6397
                       cluster_info.enabled_hypervisors)
6398
    for nname, nresult in node_data.items():
6399
      # first fill in static (config-based) values
6400
      ninfo = cfg.GetNodeInfo(nname)
6401
      pnr = {
6402
        "tags": list(ninfo.GetTags()),
6403
        "primary_ip": ninfo.primary_ip,
6404
        "secondary_ip": ninfo.secondary_ip,
6405
        "offline": ninfo.offline,
6406
        "master_candidate": ninfo.master_candidate,
6407
        }
6408

    
6409
      if not ninfo.offline:
6410
        nresult.Raise()
6411
        if not isinstance(nresult.data, dict):
6412
          raise errors.OpExecError("Can't get data for node %s" % nname)
6413
        remote_info = nresult.data
6414
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
6415
                     'vg_size', 'vg_free', 'cpu_total']:
6416
          if attr not in remote_info:
6417
            raise errors.OpExecError("Node '%s' didn't return attribute"
6418
                                     " '%s'" % (nname, attr))
6419
          try:
6420
            remote_info[attr] = int(remote_info[attr])
6421
          except ValueError, err:
6422
            raise errors.OpExecError("Node '%s' returned invalid value"
6423
                                     " for '%s': %s" % (nname, attr, err))
6424
        # compute memory used by primary instances
6425
        i_p_mem = i_p_up_mem = 0
6426
        for iinfo, beinfo in i_list:
6427
          if iinfo.primary_node == nname:
6428
            i_p_mem += beinfo[constants.BE_MEMORY]
6429
            if iinfo.name not in node_iinfo[nname].data:
6430
              i_used_mem = 0
6431
            else:
6432
              i_used_mem = int(node_iinfo[nname].data[iinfo.name]['memory'])
6433
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
6434
            remote_info['memory_free'] -= max(0, i_mem_diff)
6435

    
6436
            if iinfo.admin_up:
6437
              i_p_up_mem += beinfo[constants.BE_MEMORY]
6438

    
6439
        # compute memory used by instances
6440
        pnr_dyn = {
6441
          "total_memory": remote_info['memory_total'],
6442
          "reserved_memory": remote_info['memory_dom0'],
6443
          "free_memory": remote_info['memory_free'],
6444
          "total_disk": remote_info['vg_size'],
6445
          "free_disk": remote_info['vg_free'],
6446
          "total_cpus": remote_info['cpu_total'],
6447
          "i_pri_memory": i_p_mem,
6448
          "i_pri_up_memory": i_p_up_mem,
6449
          }
6450
        pnr.update(pnr_dyn)
6451

    
6452
      node_results[nname] = pnr
6453
    data["nodes"] = node_results
6454

    
6455
    # instance data
6456
    instance_data = {}
6457
    for iinfo, beinfo in i_list:
6458
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
6459
                  for n in iinfo.nics]
6460
      pir = {
6461
        "tags": list(iinfo.GetTags()),
6462
        "admin_up": iinfo.admin_up,
6463
        "vcpus": beinfo[constants.BE_VCPUS],
6464
        "memory": beinfo[constants.BE_MEMORY],
6465
        "os": iinfo.os,
6466
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
6467
        "nics": nic_data,
6468
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
6469
        "disk_template": iinfo.disk_template,
6470
        "hypervisor": iinfo.hypervisor,
6471
        }
6472
      instance_data[iinfo.name] = pir
6473

    
6474
    data["instances"] = instance_data
6475

    
6476
    self.in_data = data
6477

    
6478
  def _AddNewInstance(self):
6479
    """Add new instance data to allocator structure.
6480

6481
    This in combination with _AllocatorGetClusterData will create the
6482
    correct structure needed as input for the allocator.
6483

6484
    The checks for the completeness of the opcode must have already been
6485
    done.
6486

6487
    """
6488
    data = self.in_data
6489

    
6490
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
6491

    
6492
    if self.disk_template in constants.DTS_NET_MIRROR:
6493
      self.required_nodes = 2
6494
    else:
6495
      self.required_nodes = 1
6496
    request = {
6497
      "type": "allocate",
6498
      "name": self.name,
6499
      "disk_template": self.disk_template,
6500
      "tags": self.tags,
6501
      "os": self.os,
6502
      "vcpus": self.vcpus,
6503
      "memory": self.mem_size,
6504
      "disks": self.disks,
6505
      "disk_space_total": disk_space,
6506
      "nics": self.nics,
6507
      "required_nodes": self.required_nodes,
6508
      }
6509
    data["request"] = request
6510

    
6511
  def _AddRelocateInstance(self):
6512
    """Add relocate instance data to allocator structure.
6513

6514
    This in combination with _IAllocatorGetClusterData will create the
6515
    correct structure needed as input for the allocator.
6516

6517
    The checks for the completeness of the opcode must have already been
6518
    done.
6519

6520
    """
6521
    instance = self.lu.cfg.GetInstanceInfo(self.name)
6522
    if instance is None:
6523
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
6524
                                   " IAllocator" % self.name)
6525

    
6526
    if instance.disk_template not in constants.DTS_NET_MIRROR:
6527
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
6528

    
6529
    if len(instance.secondary_nodes) != 1:
6530
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
6531

    
6532
    self.required_nodes = 1
6533
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
6534
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
6535

    
6536
    request = {
6537
      "type": "relocate",
6538
      "name": self.name,
6539
      "disk_space_total": disk_space,
6540
      "required_nodes": self.required_nodes,
6541
      "relocate_from": self.relocate_from,
6542
      }
6543
    self.in_data["request"] = request
6544

    
6545
  def _BuildInputData(self):
6546
    """Build input data structures.
6547

6548
    """
6549
    self._ComputeClusterData()
6550

    
6551
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6552
      self._AddNewInstance()
6553
    else:
6554
      self._AddRelocateInstance()
6555

    
6556
    self.in_text = serializer.Dump(self.in_data)
6557

    
6558
  def Run(self, name, validate=True, call_fn=None):
6559
    """Run an instance allocator and return the results.
6560

6561
    """
6562
    if call_fn is None:
6563
      call_fn = self.lu.rpc.call_iallocator_runner
6564
    data = self.in_text
6565

    
6566
    result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
6567
    result.Raise()
6568

    
6569
    if not isinstance(result.data, (list, tuple)) or len(result.data) != 4:
6570
      raise errors.OpExecError("Invalid result from master iallocator runner")
6571

    
6572
    rcode, stdout, stderr, fail = result.data
6573

    
6574
    if rcode == constants.IARUN_NOTFOUND:
6575
      raise errors.OpExecError("Can't find allocator '%s'" % name)
6576
    elif rcode == constants.IARUN_FAILURE:
6577
      raise errors.OpExecError("Instance allocator call failed: %s,"
6578
                               " output: %s" % (fail, stdout+stderr))
6579
    self.out_text = stdout
6580
    if validate:
6581
      self._ValidateResult()
6582

    
6583
  def _ValidateResult(self):
6584
    """Process the allocator results.
6585

6586
    This will process and if successful save the result in
6587
    self.out_data and the other parameters.
6588

6589
    """
6590
    try:
6591
      rdict = serializer.Load(self.out_text)
6592
    except Exception, err:
6593
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
6594

    
6595
    if not isinstance(rdict, dict):
6596
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
6597

    
6598
    for key in "success", "info", "nodes":
6599
      if key not in rdict:
6600
        raise errors.OpExecError("Can't parse iallocator results:"
6601
                                 " missing key '%s'" % key)
6602
      setattr(self, key, rdict[key])
6603

    
6604
    if not isinstance(rdict["nodes"], list):
6605
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
6606
                               " is not a list")
6607
    self.out_data = rdict
6608

    
6609

    
6610
class LUTestAllocator(NoHooksLU):
6611
  """Run allocator tests.
6612

6613
  This LU runs the allocator tests
6614

6615
  """
6616
  _OP_REQP = ["direction", "mode", "name"]
6617

    
6618
  def CheckPrereq(self):
6619
    """Check prerequisites.
6620

6621
    This checks the opcode parameters depending on the director and mode test.
6622

6623
    """
6624
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
6625
      for attr in ["name", "mem_size", "disks", "disk_template",
6626
                   "os", "tags", "nics", "vcpus"]:
6627
        if not hasattr(self.op, attr):
6628
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
6629
                                     attr)
6630
      iname = self.cfg.ExpandInstanceName(self.op.name)
6631
      if iname is not None:
6632
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
6633
                                   iname)
6634
      if not isinstance(self.op.nics, list):
6635
        raise errors.OpPrereqError("Invalid parameter 'nics'")
6636
      for row in self.op.nics:
6637
        if (not isinstance(row, dict) or
6638
            "mac" not in row or
6639
            "ip" not in row or
6640
            "bridge" not in row):
6641
          raise errors.OpPrereqError("Invalid contents of the"
6642
                                     " 'nics' parameter")
6643
      if not isinstance(self.op.disks, list):
6644
        raise errors.OpPrereqError("Invalid parameter 'disks'")
6645
      for row in self.op.disks:
6646
        if (not isinstance(row, dict) or
6647
            "size" not in row or
6648
            not isinstance(row["size"], int) or
6649
            "mode" not in row or
6650
            row["mode"] not in ['r', 'w']):
6651
          raise errors.OpPrereqError("Invalid contents of the"
6652
                                     " 'disks' parameter")
6653
      if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None:
6654
        self.op.hypervisor = self.cfg.GetHypervisorType()
6655
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
6656
      if not hasattr(self.op, "name"):
6657
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
6658
      fname = self.cfg.ExpandInstanceName(self.op.name)
6659
      if fname is None:
6660
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
6661
                                   self.op.name)
6662
      self.op.name = fname
6663
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
6664
    else:
6665
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
6666
                                 self.op.mode)
6667

    
6668
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
6669
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
6670
        raise errors.OpPrereqError("Missing allocator name")
6671
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
6672
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
6673
                                 self.op.direction)
6674

    
6675
  def Exec(self, feedback_fn):
6676
    """Run the allocator test.
6677

6678
    """
6679
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
6680
      ial = IAllocator(self,
6681
                       mode=self.op.mode,
6682
                       name=self.op.name,
6683
                       mem_size=self.op.mem_size,
6684
                       disks=self.op.disks,
6685
                       disk_template=self.op.disk_template,
6686
                       os=self.op.os,
6687
                       tags=self.op.tags,
6688
                       nics=self.op.nics,
6689
                       vcpus=self.op.vcpus,
6690
                       hypervisor=self.op.hypervisor,
6691
                       )
6692
    else:
6693
      ial = IAllocator(self,
6694
                       mode=self.op.mode,
6695
                       name=self.op.name,
6696
                       relocate_from=list(self.relocate_from),
6697
                       )
6698

    
6699
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
6700
      result = ial.in_text
6701
    else:
6702
      ial.Run(self.op.allocator, validate=False)
6703
      result = ial.out_text
6704
    return result