Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ ae5849b5

History | View | Annotate | Download (188.5 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the master-side code."""
23

    
24
# pylint: disable-msg=W0613,W0201
25

    
26
import os
27
import os.path
28
import sha
29
import time
30
import tempfile
31
import re
32
import platform
33
import logging
34

    
35
from ganeti import rpc
36
from ganeti import ssh
37
from ganeti import logger
38
from ganeti import utils
39
from ganeti import errors
40
from ganeti import hypervisor
41
from ganeti import locking
42
from ganeti import constants
43
from ganeti import objects
44
from ganeti import opcodes
45
from ganeti import serializer
46

    
47

    
48
class LogicalUnit(object):
49
  """Logical Unit base class.
50

51
  Subclasses must follow these rules:
52
    - implement ExpandNames
53
    - implement CheckPrereq
54
    - implement Exec
55
    - implement BuildHooksEnv
56
    - redefine HPATH and HTYPE
57
    - optionally redefine their run requirements:
58
        REQ_MASTER: the LU needs to run on the master node
59
        REQ_WSSTORE: the LU needs a writable SimpleStore
60
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
61

62
  Note that all commands require root permissions.
63

64
  """
65
  HPATH = None
66
  HTYPE = None
67
  _OP_REQP = []
68
  REQ_MASTER = True
69
  REQ_WSSTORE = False
70
  REQ_BGL = True
71

    
72
  def __init__(self, processor, op, context, sstore):
73
    """Constructor for LogicalUnit.
74

75
    This needs to be overriden in derived classes in order to check op
76
    validity.
77

78
    """
79
    self.proc = processor
80
    self.op = op
81
    self.cfg = context.cfg
82
    self.sstore = sstore
83
    self.context = context
84
    # Dicts used to declare locking needs to mcpu
85
    self.needed_locks = None
86
    self.acquired_locks = {}
87
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
88
    self.add_locks = {}
89
    self.remove_locks = {}
90
    # Used to force good behavior when calling helper functions
91
    self.recalculate_locks = {}
92
    self.__ssh = None
93

    
94
    for attr_name in self._OP_REQP:
95
      attr_val = getattr(op, attr_name, None)
96
      if attr_val is None:
97
        raise errors.OpPrereqError("Required parameter '%s' missing" %
98
                                   attr_name)
99

    
100
    if not self.cfg.IsCluster():
101
      raise errors.OpPrereqError("Cluster not initialized yet,"
102
                                 " use 'gnt-cluster init' first.")
103
    if self.REQ_MASTER:
104
      master = sstore.GetMasterNode()
105
      if master != utils.HostInfo().name:
106
        raise errors.OpPrereqError("Commands must be run on the master"
107
                                   " node %s" % master)
108

    
109
  def __GetSSH(self):
110
    """Returns the SshRunner object
111

112
    """
113
    if not self.__ssh:
114
      self.__ssh = ssh.SshRunner(self.sstore)
115
    return self.__ssh
116

    
117
  ssh = property(fget=__GetSSH)
118

    
119
  def ExpandNames(self):
120
    """Expand names for this LU.
121

122
    This method is called before starting to execute the opcode, and it should
123
    update all the parameters of the opcode to their canonical form (e.g. a
124
    short node name must be fully expanded after this method has successfully
125
    completed). This way locking, hooks, logging, ecc. can work correctly.
126

127
    LUs which implement this method must also populate the self.needed_locks
128
    member, as a dict with lock levels as keys, and a list of needed lock names
129
    as values. Rules:
130
      - Use an empty dict if you don't need any lock
131
      - If you don't need any lock at a particular level omit that level
132
      - Don't put anything for the BGL level
133
      - If you want all locks at a level use locking.ALL_SET as a value
134

135
    If you need to share locks (rather than acquire them exclusively) at one
136
    level you can modify self.share_locks, setting a true value (usually 1) for
137
    that level. By default locks are not shared.
138

139
    Examples:
140
    # Acquire all nodes and one instance
141
    self.needed_locks = {
142
      locking.LEVEL_NODE: locking.ALL_SET,
143
      locking.LEVEL_INSTANCE: ['instance1.example.tld'],
144
    }
145
    # Acquire just two nodes
146
    self.needed_locks = {
147
      locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
148
    }
149
    # Acquire no locks
150
    self.needed_locks = {} # No, you can't leave it to the default value None
151

152
    """
153
    # The implementation of this method is mandatory only if the new LU is
154
    # concurrent, so that old LUs don't need to be changed all at the same
155
    # time.
156
    if self.REQ_BGL:
157
      self.needed_locks = {} # Exclusive LUs don't need locks.
158
    else:
159
      raise NotImplementedError
160

    
161
  def DeclareLocks(self, level):
162
    """Declare LU locking needs for a level
163

164
    While most LUs can just declare their locking needs at ExpandNames time,
165
    sometimes there's the need to calculate some locks after having acquired
166
    the ones before. This function is called just before acquiring locks at a
167
    particular level, but after acquiring the ones at lower levels, and permits
168
    such calculations. It can be used to modify self.needed_locks, and by
169
    default it does nothing.
170

171
    This function is only called if you have something already set in
172
    self.needed_locks for the level.
173

174
    @param level: Locking level which is going to be locked
175
    @type level: member of ganeti.locking.LEVELS
176

177
    """
178

    
179
  def CheckPrereq(self):
180
    """Check prerequisites for this LU.
181

182
    This method should check that the prerequisites for the execution
183
    of this LU are fulfilled. It can do internode communication, but
184
    it should be idempotent - no cluster or system changes are
185
    allowed.
186

187
    The method should raise errors.OpPrereqError in case something is
188
    not fulfilled. Its return value is ignored.
189

190
    This method should also update all the parameters of the opcode to
191
    their canonical form if it hasn't been done by ExpandNames before.
192

193
    """
194
    raise NotImplementedError
195

    
196
  def Exec(self, feedback_fn):
197
    """Execute the LU.
198

199
    This method should implement the actual work. It should raise
200
    errors.OpExecError for failures that are somewhat dealt with in
201
    code, or expected.
202

203
    """
204
    raise NotImplementedError
205

    
206
  def BuildHooksEnv(self):
207
    """Build hooks environment for this LU.
208

209
    This method should return a three-node tuple consisting of: a dict
210
    containing the environment that will be used for running the
211
    specific hook for this LU, a list of node names on which the hook
212
    should run before the execution, and a list of node names on which
213
    the hook should run after the execution.
214

215
    The keys of the dict must not have 'GANETI_' prefixed as this will
216
    be handled in the hooks runner. Also note additional keys will be
217
    added by the hooks runner. If the LU doesn't define any
218
    environment, an empty dict (and not None) should be returned.
219

220
    No nodes should be returned as an empty list (and not None).
221

222
    Note that if the HPATH for a LU class is None, this function will
223
    not be called.
224

225
    """
226
    raise NotImplementedError
227

    
228
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
229
    """Notify the LU about the results of its hooks.
230

231
    This method is called every time a hooks phase is executed, and notifies
232
    the Logical Unit about the hooks' result. The LU can then use it to alter
233
    its result based on the hooks.  By default the method does nothing and the
234
    previous result is passed back unchanged but any LU can define it if it
235
    wants to use the local cluster hook-scripts somehow.
236

237
    Args:
238
      phase: the hooks phase that has just been run
239
      hooks_results: the results of the multi-node hooks rpc call
240
      feedback_fn: function to send feedback back to the caller
241
      lu_result: the previous result this LU had, or None in the PRE phase.
242

243
    """
244
    return lu_result
245

    
246
  def _ExpandAndLockInstance(self):
247
    """Helper function to expand and lock an instance.
248

249
    Many LUs that work on an instance take its name in self.op.instance_name
250
    and need to expand it and then declare the expanded name for locking. This
251
    function does it, and then updates self.op.instance_name to the expanded
252
    name. It also initializes needed_locks as a dict, if this hasn't been done
253
    before.
254

255
    """
256
    if self.needed_locks is None:
257
      self.needed_locks = {}
258
    else:
259
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
260
        "_ExpandAndLockInstance called with instance-level locks set"
261
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
262
    if expanded_name is None:
263
      raise errors.OpPrereqError("Instance '%s' not known" %
264
                                  self.op.instance_name)
265
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
266
    self.op.instance_name = expanded_name
267

    
268
  def _LockInstancesNodes(self, primary_only=False):
269
    """Helper function to declare instances' nodes for locking.
270

271
    This function should be called after locking one or more instances to lock
272
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
273
    with all primary or secondary nodes for instances already locked and
274
    present in self.needed_locks[locking.LEVEL_INSTANCE].
275

276
    It should be called from DeclareLocks, and for safety only works if
277
    self.recalculate_locks[locking.LEVEL_NODE] is set.
278

279
    In the future it may grow parameters to just lock some instance's nodes, or
280
    to just lock primaries or secondary nodes, if needed.
281

282
    If should be called in DeclareLocks in a way similar to:
283

284
    if level == locking.LEVEL_NODE:
285
      self._LockInstancesNodes()
286

287
    @type primary_only: boolean
288
    @param primary_only: only lock primary nodes of locked instances
289

290
    """
291
    assert locking.LEVEL_NODE in self.recalculate_locks, \
292
      "_LockInstancesNodes helper function called with no nodes to recalculate"
293

    
294
    # TODO: check if we're really been called with the instance locks held
295

    
296
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
297
    # future we might want to have different behaviors depending on the value
298
    # of self.recalculate_locks[locking.LEVEL_NODE]
299
    wanted_nodes = []
300
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
301
      instance = self.context.cfg.GetInstanceInfo(instance_name)
302
      wanted_nodes.append(instance.primary_node)
303
      if not primary_only:
304
        wanted_nodes.extend(instance.secondary_nodes)
305

    
306
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
307
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
308
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
309
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
310

    
311
    del self.recalculate_locks[locking.LEVEL_NODE]
312

    
313

    
314
class NoHooksLU(LogicalUnit):
315
  """Simple LU which runs no hooks.
316

317
  This LU is intended as a parent for other LogicalUnits which will
318
  run no hooks, in order to reduce duplicate code.
319

320
  """
321
  HPATH = None
322
  HTYPE = None
323

    
324

    
325
def _GetWantedNodes(lu, nodes):
326
  """Returns list of checked and expanded node names.
327

328
  Args:
329
    nodes: List of nodes (strings) or None for all
330

331
  """
332
  if not isinstance(nodes, list):
333
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
334

    
335
  if not nodes:
336
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
337
      " non-empty list of nodes whose name is to be expanded.")
338

    
339
  wanted = []
340
  for name in nodes:
341
    node = lu.cfg.ExpandNodeName(name)
342
    if node is None:
343
      raise errors.OpPrereqError("No such node name '%s'" % name)
344
    wanted.append(node)
345

    
346
  return utils.NiceSort(wanted)
347

    
348

    
349
def _GetWantedInstances(lu, instances):
350
  """Returns list of checked and expanded instance names.
351

352
  Args:
353
    instances: List of instances (strings) or None for all
354

355
  """
356
  if not isinstance(instances, list):
357
    raise errors.OpPrereqError("Invalid argument type 'instances'")
358

    
359
  if instances:
360
    wanted = []
361

    
362
    for name in instances:
363
      instance = lu.cfg.ExpandInstanceName(name)
364
      if instance is None:
365
        raise errors.OpPrereqError("No such instance name '%s'" % name)
366
      wanted.append(instance)
367

    
368
  else:
369
    wanted = lu.cfg.GetInstanceList()
370
  return utils.NiceSort(wanted)
371

    
372

    
373
def _CheckOutputFields(static, dynamic, selected):
374
  """Checks whether all selected fields are valid.
375

376
  Args:
377
    static: Static fields
378
    dynamic: Dynamic fields
379

380
  """
381
  static_fields = frozenset(static)
382
  dynamic_fields = frozenset(dynamic)
383

    
384
  all_fields = static_fields | dynamic_fields
385

    
386
  if not all_fields.issuperset(selected):
387
    raise errors.OpPrereqError("Unknown output fields selected: %s"
388
                               % ",".join(frozenset(selected).
389
                                          difference(all_fields)))
390

    
391

    
392
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
393
                          memory, vcpus, nics):
394
  """Builds instance related env variables for hooks from single variables.
395

396
  Args:
397
    secondary_nodes: List of secondary nodes as strings
398
  """
399
  env = {
400
    "OP_TARGET": name,
401
    "INSTANCE_NAME": name,
402
    "INSTANCE_PRIMARY": primary_node,
403
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
404
    "INSTANCE_OS_TYPE": os_type,
405
    "INSTANCE_STATUS": status,
406
    "INSTANCE_MEMORY": memory,
407
    "INSTANCE_VCPUS": vcpus,
408
  }
409

    
410
  if nics:
411
    nic_count = len(nics)
412
    for idx, (ip, bridge, mac) in enumerate(nics):
413
      if ip is None:
414
        ip = ""
415
      env["INSTANCE_NIC%d_IP" % idx] = ip
416
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
417
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
418
  else:
419
    nic_count = 0
420

    
421
  env["INSTANCE_NIC_COUNT"] = nic_count
422

    
423
  return env
424

    
425

    
426
def _BuildInstanceHookEnvByObject(instance, override=None):
427
  """Builds instance related env variables for hooks from an object.
428

429
  Args:
430
    instance: objects.Instance object of instance
431
    override: dict of values to override
432
  """
433
  args = {
434
    'name': instance.name,
435
    'primary_node': instance.primary_node,
436
    'secondary_nodes': instance.secondary_nodes,
437
    'os_type': instance.os,
438
    'status': instance.os,
439
    'memory': instance.memory,
440
    'vcpus': instance.vcpus,
441
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
442
  }
443
  if override:
444
    args.update(override)
445
  return _BuildInstanceHookEnv(**args)
446

    
447

    
448
def _CheckInstanceBridgesExist(instance):
449
  """Check that the brigdes needed by an instance exist.
450

451
  """
452
  # check bridges existance
453
  brlist = [nic.bridge for nic in instance.nics]
454
  if not rpc.call_bridges_exist(instance.primary_node, brlist):
455
    raise errors.OpPrereqError("one or more target bridges %s does not"
456
                               " exist on destination node '%s'" %
457
                               (brlist, instance.primary_node))
458

    
459

    
460
class LUDestroyCluster(NoHooksLU):
461
  """Logical unit for destroying the cluster.
462

463
  """
464
  _OP_REQP = []
465

    
466
  def CheckPrereq(self):
467
    """Check prerequisites.
468

469
    This checks whether the cluster is empty.
470

471
    Any errors are signalled by raising errors.OpPrereqError.
472

473
    """
474
    master = self.sstore.GetMasterNode()
475

    
476
    nodelist = self.cfg.GetNodeList()
477
    if len(nodelist) != 1 or nodelist[0] != master:
478
      raise errors.OpPrereqError("There are still %d node(s) in"
479
                                 " this cluster." % (len(nodelist) - 1))
480
    instancelist = self.cfg.GetInstanceList()
481
    if instancelist:
482
      raise errors.OpPrereqError("There are still %d instance(s) in"
483
                                 " this cluster." % len(instancelist))
484

    
485
  def Exec(self, feedback_fn):
486
    """Destroys the cluster.
487

488
    """
489
    master = self.sstore.GetMasterNode()
490
    if not rpc.call_node_stop_master(master, False):
491
      raise errors.OpExecError("Could not disable the master role")
492
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
493
    utils.CreateBackup(priv_key)
494
    utils.CreateBackup(pub_key)
495
    return master
496

    
497

    
498
class LUVerifyCluster(LogicalUnit):
499
  """Verifies the cluster status.
500

501
  """
502
  HPATH = "cluster-verify"
503
  HTYPE = constants.HTYPE_CLUSTER
504
  _OP_REQP = ["skip_checks"]
505
  REQ_BGL = False
506

    
507
  def ExpandNames(self):
508
    self.needed_locks = {
509
      locking.LEVEL_NODE: locking.ALL_SET,
510
      locking.LEVEL_INSTANCE: locking.ALL_SET,
511
    }
512
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
513

    
514
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
515
                  remote_version, feedback_fn):
516
    """Run multiple tests against a node.
517

518
    Test list:
519
      - compares ganeti version
520
      - checks vg existance and size > 20G
521
      - checks config file checksum
522
      - checks ssh to other nodes
523

524
    Args:
525
      node: name of the node to check
526
      file_list: required list of files
527
      local_cksum: dictionary of local files and their checksums
528

529
    """
530
    # compares ganeti version
531
    local_version = constants.PROTOCOL_VERSION
532
    if not remote_version:
533
      feedback_fn("  - ERROR: connection to %s failed" % (node))
534
      return True
535

    
536
    if local_version != remote_version:
537
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
538
                      (local_version, node, remote_version))
539
      return True
540

    
541
    # checks vg existance and size > 20G
542

    
543
    bad = False
544
    if not vglist:
545
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
546
                      (node,))
547
      bad = True
548
    else:
549
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
550
                                            constants.MIN_VG_SIZE)
551
      if vgstatus:
552
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
553
        bad = True
554

    
555
    # checks config file checksum
556
    # checks ssh to any
557

    
558
    if 'filelist' not in node_result:
559
      bad = True
560
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
561
    else:
562
      remote_cksum = node_result['filelist']
563
      for file_name in file_list:
564
        if file_name not in remote_cksum:
565
          bad = True
566
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
567
        elif remote_cksum[file_name] != local_cksum[file_name]:
568
          bad = True
569
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
570

    
571
    if 'nodelist' not in node_result:
572
      bad = True
573
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
574
    else:
575
      if node_result['nodelist']:
576
        bad = True
577
        for node in node_result['nodelist']:
578
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
579
                          (node, node_result['nodelist'][node]))
580
    if 'node-net-test' not in node_result:
581
      bad = True
582
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
583
    else:
584
      if node_result['node-net-test']:
585
        bad = True
586
        nlist = utils.NiceSort(node_result['node-net-test'].keys())
587
        for node in nlist:
588
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
589
                          (node, node_result['node-net-test'][node]))
590

    
591
    hyp_result = node_result.get('hypervisor', None)
592
    if hyp_result is not None:
593
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
594
    return bad
595

    
596
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
597
                      node_instance, feedback_fn):
598
    """Verify an instance.
599

600
    This function checks to see if the required block devices are
601
    available on the instance's node.
602

603
    """
604
    bad = False
605

    
606
    node_current = instanceconfig.primary_node
607

    
608
    node_vol_should = {}
609
    instanceconfig.MapLVsByNode(node_vol_should)
610

    
611
    for node in node_vol_should:
612
      for volume in node_vol_should[node]:
613
        if node not in node_vol_is or volume not in node_vol_is[node]:
614
          feedback_fn("  - ERROR: volume %s missing on node %s" %
615
                          (volume, node))
616
          bad = True
617

    
618
    if not instanceconfig.status == 'down':
619
      if (node_current not in node_instance or
620
          not instance in node_instance[node_current]):
621
        feedback_fn("  - ERROR: instance %s not running on node %s" %
622
                        (instance, node_current))
623
        bad = True
624

    
625
    for node in node_instance:
626
      if (not node == node_current):
627
        if instance in node_instance[node]:
628
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
629
                          (instance, node))
630
          bad = True
631

    
632
    return bad
633

    
634
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
635
    """Verify if there are any unknown volumes in the cluster.
636

637
    The .os, .swap and backup volumes are ignored. All other volumes are
638
    reported as unknown.
639

640
    """
641
    bad = False
642

    
643
    for node in node_vol_is:
644
      for volume in node_vol_is[node]:
645
        if node not in node_vol_should or volume not in node_vol_should[node]:
646
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
647
                      (volume, node))
648
          bad = True
649
    return bad
650

    
651
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
652
    """Verify the list of running instances.
653

654
    This checks what instances are running but unknown to the cluster.
655

656
    """
657
    bad = False
658
    for node in node_instance:
659
      for runninginstance in node_instance[node]:
660
        if runninginstance not in instancelist:
661
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
662
                          (runninginstance, node))
663
          bad = True
664
    return bad
665

    
666
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
667
    """Verify N+1 Memory Resilience.
668

669
    Check that if one single node dies we can still start all the instances it
670
    was primary for.
671

672
    """
673
    bad = False
674

    
675
    for node, nodeinfo in node_info.iteritems():
676
      # This code checks that every node which is now listed as secondary has
677
      # enough memory to host all instances it is supposed to should a single
678
      # other node in the cluster fail.
679
      # FIXME: not ready for failover to an arbitrary node
680
      # FIXME: does not support file-backed instances
681
      # WARNING: we currently take into account down instances as well as up
682
      # ones, considering that even if they're down someone might want to start
683
      # them even in the event of a node failure.
684
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
685
        needed_mem = 0
686
        for instance in instances:
687
          needed_mem += instance_cfg[instance].memory
688
        if nodeinfo['mfree'] < needed_mem:
689
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
690
                      " failovers should node %s fail" % (node, prinode))
691
          bad = True
692
    return bad
693

    
694
  def CheckPrereq(self):
695
    """Check prerequisites.
696

697
    Transform the list of checks we're going to skip into a set and check that
698
    all its members are valid.
699

700
    """
701
    self.skip_set = frozenset(self.op.skip_checks)
702
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
703
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
704

    
705
  def BuildHooksEnv(self):
706
    """Build hooks env.
707

708
    Cluster-Verify hooks just rone in the post phase and their failure makes
709
    the output be logged in the verify output and the verification to fail.
710

711
    """
712
    all_nodes = self.cfg.GetNodeList()
713
    # TODO: populate the environment with useful information for verify hooks
714
    env = {}
715
    return env, [], all_nodes
716

    
717
  def Exec(self, feedback_fn):
718
    """Verify integrity of cluster, performing various test on nodes.
719

720
    """
721
    bad = False
722
    feedback_fn("* Verifying global settings")
723
    for msg in self.cfg.VerifyConfig():
724
      feedback_fn("  - ERROR: %s" % msg)
725

    
726
    vg_name = self.cfg.GetVGName()
727
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
728
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
729
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
730
    i_non_redundant = [] # Non redundant instances
731
    node_volume = {}
732
    node_instance = {}
733
    node_info = {}
734
    instance_cfg = {}
735

    
736
    # FIXME: verify OS list
737
    # do local checksums
738
    file_names = list(self.sstore.GetFileList())
739
    file_names.append(constants.SSL_CERT_FILE)
740
    file_names.append(constants.CLUSTER_CONF_FILE)
741
    local_checksums = utils.FingerprintFiles(file_names)
742

    
743
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
744
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
745
    all_instanceinfo = rpc.call_instance_list(nodelist)
746
    all_vglist = rpc.call_vg_list(nodelist)
747
    node_verify_param = {
748
      'filelist': file_names,
749
      'nodelist': nodelist,
750
      'hypervisor': None,
751
      'node-net-test': [(node.name, node.primary_ip, node.secondary_ip)
752
                        for node in nodeinfo]
753
      }
754
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
755
    all_rversion = rpc.call_version(nodelist)
756
    all_ninfo = rpc.call_node_info(nodelist, self.cfg.GetVGName())
757

    
758
    for node in nodelist:
759
      feedback_fn("* Verifying node %s" % node)
760
      result = self._VerifyNode(node, file_names, local_checksums,
761
                                all_vglist[node], all_nvinfo[node],
762
                                all_rversion[node], feedback_fn)
763
      bad = bad or result
764

    
765
      # node_volume
766
      volumeinfo = all_volumeinfo[node]
767

    
768
      if isinstance(volumeinfo, basestring):
769
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
770
                    (node, volumeinfo[-400:].encode('string_escape')))
771
        bad = True
772
        node_volume[node] = {}
773
      elif not isinstance(volumeinfo, dict):
774
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
775
        bad = True
776
        continue
777
      else:
778
        node_volume[node] = volumeinfo
779

    
780
      # node_instance
781
      nodeinstance = all_instanceinfo[node]
782
      if type(nodeinstance) != list:
783
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
784
        bad = True
785
        continue
786

    
787
      node_instance[node] = nodeinstance
788

    
789
      # node_info
790
      nodeinfo = all_ninfo[node]
791
      if not isinstance(nodeinfo, dict):
792
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
793
        bad = True
794
        continue
795

    
796
      try:
797
        node_info[node] = {
798
          "mfree": int(nodeinfo['memory_free']),
799
          "dfree": int(nodeinfo['vg_free']),
800
          "pinst": [],
801
          "sinst": [],
802
          # dictionary holding all instances this node is secondary for,
803
          # grouped by their primary node. Each key is a cluster node, and each
804
          # value is a list of instances which have the key as primary and the
805
          # current node as secondary.  this is handy to calculate N+1 memory
806
          # availability if you can only failover from a primary to its
807
          # secondary.
808
          "sinst-by-pnode": {},
809
        }
810
      except ValueError:
811
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
812
        bad = True
813
        continue
814

    
815
    node_vol_should = {}
816

    
817
    for instance in instancelist:
818
      feedback_fn("* Verifying instance %s" % instance)
819
      inst_config = self.cfg.GetInstanceInfo(instance)
820
      result =  self._VerifyInstance(instance, inst_config, node_volume,
821
                                     node_instance, feedback_fn)
822
      bad = bad or result
823

    
824
      inst_config.MapLVsByNode(node_vol_should)
825

    
826
      instance_cfg[instance] = inst_config
827

    
828
      pnode = inst_config.primary_node
829
      if pnode in node_info:
830
        node_info[pnode]['pinst'].append(instance)
831
      else:
832
        feedback_fn("  - ERROR: instance %s, connection to primary node"
833
                    " %s failed" % (instance, pnode))
834
        bad = True
835

    
836
      # If the instance is non-redundant we cannot survive losing its primary
837
      # node, so we are not N+1 compliant. On the other hand we have no disk
838
      # templates with more than one secondary so that situation is not well
839
      # supported either.
840
      # FIXME: does not support file-backed instances
841
      if len(inst_config.secondary_nodes) == 0:
842
        i_non_redundant.append(instance)
843
      elif len(inst_config.secondary_nodes) > 1:
844
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
845
                    % instance)
846

    
847
      for snode in inst_config.secondary_nodes:
848
        if snode in node_info:
849
          node_info[snode]['sinst'].append(instance)
850
          if pnode not in node_info[snode]['sinst-by-pnode']:
851
            node_info[snode]['sinst-by-pnode'][pnode] = []
852
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
853
        else:
854
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
855
                      " %s failed" % (instance, snode))
856

    
857
    feedback_fn("* Verifying orphan volumes")
858
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
859
                                       feedback_fn)
860
    bad = bad or result
861

    
862
    feedback_fn("* Verifying remaining instances")
863
    result = self._VerifyOrphanInstances(instancelist, node_instance,
864
                                         feedback_fn)
865
    bad = bad or result
866

    
867
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
868
      feedback_fn("* Verifying N+1 Memory redundancy")
869
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
870
      bad = bad or result
871

    
872
    feedback_fn("* Other Notes")
873
    if i_non_redundant:
874
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
875
                  % len(i_non_redundant))
876

    
877
    return not bad
878

    
879
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
880
    """Analize the post-hooks' result, handle it, and send some
881
    nicely-formatted feedback back to the user.
882

883
    Args:
884
      phase: the hooks phase that has just been run
885
      hooks_results: the results of the multi-node hooks rpc call
886
      feedback_fn: function to send feedback back to the caller
887
      lu_result: previous Exec result
888

889
    """
890
    # We only really run POST phase hooks, and are only interested in
891
    # their results
892
    if phase == constants.HOOKS_PHASE_POST:
893
      # Used to change hooks' output to proper indentation
894
      indent_re = re.compile('^', re.M)
895
      feedback_fn("* Hooks Results")
896
      if not hooks_results:
897
        feedback_fn("  - ERROR: general communication failure")
898
        lu_result = 1
899
      else:
900
        for node_name in hooks_results:
901
          show_node_header = True
902
          res = hooks_results[node_name]
903
          if res is False or not isinstance(res, list):
904
            feedback_fn("    Communication failure")
905
            lu_result = 1
906
            continue
907
          for script, hkr, output in res:
908
            if hkr == constants.HKR_FAIL:
909
              # The node header is only shown once, if there are
910
              # failing hooks on that node
911
              if show_node_header:
912
                feedback_fn("  Node %s:" % node_name)
913
                show_node_header = False
914
              feedback_fn("    ERROR: Script %s failed, output:" % script)
915
              output = indent_re.sub('      ', output)
916
              feedback_fn("%s" % output)
917
              lu_result = 1
918

    
919
      return lu_result
920

    
921

    
922
class LUVerifyDisks(NoHooksLU):
923
  """Verifies the cluster disks status.
924

925
  """
926
  _OP_REQP = []
927
  REQ_BGL = False
928

    
929
  def ExpandNames(self):
930
    self.needed_locks = {
931
      locking.LEVEL_NODE: locking.ALL_SET,
932
      locking.LEVEL_INSTANCE: locking.ALL_SET,
933
    }
934
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
935

    
936
  def CheckPrereq(self):
937
    """Check prerequisites.
938

939
    This has no prerequisites.
940

941
    """
942
    pass
943

    
944
  def Exec(self, feedback_fn):
945
    """Verify integrity of cluster disks.
946

947
    """
948
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
949

    
950
    vg_name = self.cfg.GetVGName()
951
    nodes = utils.NiceSort(self.cfg.GetNodeList())
952
    instances = [self.cfg.GetInstanceInfo(name)
953
                 for name in self.cfg.GetInstanceList()]
954

    
955
    nv_dict = {}
956
    for inst in instances:
957
      inst_lvs = {}
958
      if (inst.status != "up" or
959
          inst.disk_template not in constants.DTS_NET_MIRROR):
960
        continue
961
      inst.MapLVsByNode(inst_lvs)
962
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
963
      for node, vol_list in inst_lvs.iteritems():
964
        for vol in vol_list:
965
          nv_dict[(node, vol)] = inst
966

    
967
    if not nv_dict:
968
      return result
969

    
970
    node_lvs = rpc.call_volume_list(nodes, vg_name)
971

    
972
    to_act = set()
973
    for node in nodes:
974
      # node_volume
975
      lvs = node_lvs[node]
976

    
977
      if isinstance(lvs, basestring):
978
        logger.Info("error enumerating LVs on node %s: %s" % (node, lvs))
979
        res_nlvm[node] = lvs
980
      elif not isinstance(lvs, dict):
981
        logger.Info("connection to node %s failed or invalid data returned" %
982
                    (node,))
983
        res_nodes.append(node)
984
        continue
985

    
986
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
987
        inst = nv_dict.pop((node, lv_name), None)
988
        if (not lv_online and inst is not None
989
            and inst.name not in res_instances):
990
          res_instances.append(inst.name)
991

    
992
    # any leftover items in nv_dict are missing LVs, let's arrange the
993
    # data better
994
    for key, inst in nv_dict.iteritems():
995
      if inst.name not in res_missing:
996
        res_missing[inst.name] = []
997
      res_missing[inst.name].append(key)
998

    
999
    return result
1000

    
1001

    
1002
class LURenameCluster(LogicalUnit):
1003
  """Rename the cluster.
1004

1005
  """
1006
  HPATH = "cluster-rename"
1007
  HTYPE = constants.HTYPE_CLUSTER
1008
  _OP_REQP = ["name"]
1009
  REQ_WSSTORE = True
1010

    
1011
  def BuildHooksEnv(self):
1012
    """Build hooks env.
1013

1014
    """
1015
    env = {
1016
      "OP_TARGET": self.sstore.GetClusterName(),
1017
      "NEW_NAME": self.op.name,
1018
      }
1019
    mn = self.sstore.GetMasterNode()
1020
    return env, [mn], [mn]
1021

    
1022
  def CheckPrereq(self):
1023
    """Verify that the passed name is a valid one.
1024

1025
    """
1026
    hostname = utils.HostInfo(self.op.name)
1027

    
1028
    new_name = hostname.name
1029
    self.ip = new_ip = hostname.ip
1030
    old_name = self.sstore.GetClusterName()
1031
    old_ip = self.sstore.GetMasterIP()
1032
    if new_name == old_name and new_ip == old_ip:
1033
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1034
                                 " cluster has changed")
1035
    if new_ip != old_ip:
1036
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1037
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1038
                                   " reachable on the network. Aborting." %
1039
                                   new_ip)
1040

    
1041
    self.op.name = new_name
1042

    
1043
  def Exec(self, feedback_fn):
1044
    """Rename the cluster.
1045

1046
    """
1047
    clustername = self.op.name
1048
    ip = self.ip
1049
    ss = self.sstore
1050

    
1051
    # shutdown the master IP
1052
    master = ss.GetMasterNode()
1053
    if not rpc.call_node_stop_master(master, False):
1054
      raise errors.OpExecError("Could not disable the master role")
1055

    
1056
    try:
1057
      # modify the sstore
1058
      ss.SetKey(ss.SS_MASTER_IP, ip)
1059
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
1060

    
1061
      # Distribute updated ss config to all nodes
1062
      myself = self.cfg.GetNodeInfo(master)
1063
      dist_nodes = self.cfg.GetNodeList()
1064
      if myself.name in dist_nodes:
1065
        dist_nodes.remove(myself.name)
1066

    
1067
      logger.Debug("Copying updated ssconf data to all nodes")
1068
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1069
        fname = ss.KeyToFilename(keyname)
1070
        result = rpc.call_upload_file(dist_nodes, fname)
1071
        for to_node in dist_nodes:
1072
          if not result[to_node]:
1073
            logger.Error("copy of file %s to node %s failed" %
1074
                         (fname, to_node))
1075
    finally:
1076
      if not rpc.call_node_start_master(master, False):
1077
        logger.Error("Could not re-enable the master role on the master,"
1078
                     " please restart manually.")
1079

    
1080

    
1081
def _RecursiveCheckIfLVMBased(disk):
1082
  """Check if the given disk or its children are lvm-based.
1083

1084
  Args:
1085
    disk: ganeti.objects.Disk object
1086

1087
  Returns:
1088
    boolean indicating whether a LD_LV dev_type was found or not
1089

1090
  """
1091
  if disk.children:
1092
    for chdisk in disk.children:
1093
      if _RecursiveCheckIfLVMBased(chdisk):
1094
        return True
1095
  return disk.dev_type == constants.LD_LV
1096

    
1097

    
1098
class LUSetClusterParams(LogicalUnit):
1099
  """Change the parameters of the cluster.
1100

1101
  """
1102
  HPATH = "cluster-modify"
1103
  HTYPE = constants.HTYPE_CLUSTER
1104
  _OP_REQP = []
1105
  REQ_BGL = False
1106

    
1107
  def ExpandNames(self):
1108
    # FIXME: in the future maybe other cluster params won't require checking on
1109
    # all nodes to be modified.
1110
    self.needed_locks = {
1111
      locking.LEVEL_NODE: locking.ALL_SET,
1112
    }
1113
    self.share_locks[locking.LEVEL_NODE] = 1
1114

    
1115
  def BuildHooksEnv(self):
1116
    """Build hooks env.
1117

1118
    """
1119
    env = {
1120
      "OP_TARGET": self.sstore.GetClusterName(),
1121
      "NEW_VG_NAME": self.op.vg_name,
1122
      }
1123
    mn = self.sstore.GetMasterNode()
1124
    return env, [mn], [mn]
1125

    
1126
  def CheckPrereq(self):
1127
    """Check prerequisites.
1128

1129
    This checks whether the given params don't conflict and
1130
    if the given volume group is valid.
1131

1132
    """
1133
    # FIXME: This only works because there is only one parameter that can be
1134
    # changed or removed.
1135
    if not self.op.vg_name:
1136
      instances = self.cfg.GetAllInstancesInfo().values()
1137
      for inst in instances:
1138
        for disk in inst.disks:
1139
          if _RecursiveCheckIfLVMBased(disk):
1140
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1141
                                       " lvm-based instances exist")
1142

    
1143
    # if vg_name not None, checks given volume group on all nodes
1144
    if self.op.vg_name:
1145
      node_list = self.acquired_locks[locking.LEVEL_NODE]
1146
      vglist = rpc.call_vg_list(node_list)
1147
      for node in node_list:
1148
        vgstatus = utils.CheckVolumeGroupSize(vglist[node], self.op.vg_name,
1149
                                              constants.MIN_VG_SIZE)
1150
        if vgstatus:
1151
          raise errors.OpPrereqError("Error on node '%s': %s" %
1152
                                     (node, vgstatus))
1153

    
1154
  def Exec(self, feedback_fn):
1155
    """Change the parameters of the cluster.
1156

1157
    """
1158
    if self.op.vg_name != self.cfg.GetVGName():
1159
      self.cfg.SetVGName(self.op.vg_name)
1160
    else:
1161
      feedback_fn("Cluster LVM configuration already in desired"
1162
                  " state, not changing")
1163

    
1164

    
1165
def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
1166
  """Sleep and poll for an instance's disk to sync.
1167

1168
  """
1169
  if not instance.disks:
1170
    return True
1171

    
1172
  if not oneshot:
1173
    proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1174

    
1175
  node = instance.primary_node
1176

    
1177
  for dev in instance.disks:
1178
    cfgw.SetDiskID(dev, node)
1179

    
1180
  retries = 0
1181
  while True:
1182
    max_time = 0
1183
    done = True
1184
    cumul_degraded = False
1185
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1186
    if not rstats:
1187
      proc.LogWarning("Can't get any data from node %s" % node)
1188
      retries += 1
1189
      if retries >= 10:
1190
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1191
                                 " aborting." % node)
1192
      time.sleep(6)
1193
      continue
1194
    retries = 0
1195
    for i in range(len(rstats)):
1196
      mstat = rstats[i]
1197
      if mstat is None:
1198
        proc.LogWarning("Can't compute data for node %s/%s" %
1199
                        (node, instance.disks[i].iv_name))
1200
        continue
1201
      # we ignore the ldisk parameter
1202
      perc_done, est_time, is_degraded, _ = mstat
1203
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1204
      if perc_done is not None:
1205
        done = False
1206
        if est_time is not None:
1207
          rem_time = "%d estimated seconds remaining" % est_time
1208
          max_time = est_time
1209
        else:
1210
          rem_time = "no time estimate"
1211
        proc.LogInfo("- device %s: %5.2f%% done, %s" %
1212
                     (instance.disks[i].iv_name, perc_done, rem_time))
1213
    if done or oneshot:
1214
      break
1215

    
1216
    time.sleep(min(60, max_time))
1217

    
1218
  if done:
1219
    proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1220
  return not cumul_degraded
1221

    
1222

    
1223
def _CheckDiskConsistency(cfgw, dev, node, on_primary, ldisk=False):
1224
  """Check that mirrors are not degraded.
1225

1226
  The ldisk parameter, if True, will change the test from the
1227
  is_degraded attribute (which represents overall non-ok status for
1228
  the device(s)) to the ldisk (representing the local storage status).
1229

1230
  """
1231
  cfgw.SetDiskID(dev, node)
1232
  if ldisk:
1233
    idx = 6
1234
  else:
1235
    idx = 5
1236

    
1237
  result = True
1238
  if on_primary or dev.AssembleOnSecondary():
1239
    rstats = rpc.call_blockdev_find(node, dev)
1240
    if not rstats:
1241
      logger.ToStderr("Node %s: Disk degraded, not found or node down" % node)
1242
      result = False
1243
    else:
1244
      result = result and (not rstats[idx])
1245
  if dev.children:
1246
    for child in dev.children:
1247
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
1248

    
1249
  return result
1250

    
1251

    
1252
class LUDiagnoseOS(NoHooksLU):
1253
  """Logical unit for OS diagnose/query.
1254

1255
  """
1256
  _OP_REQP = ["output_fields", "names"]
1257
  REQ_BGL = False
1258

    
1259
  def ExpandNames(self):
1260
    if self.op.names:
1261
      raise errors.OpPrereqError("Selective OS query not supported")
1262

    
1263
    self.dynamic_fields = frozenset(["name", "valid", "node_status"])
1264
    _CheckOutputFields(static=[],
1265
                       dynamic=self.dynamic_fields,
1266
                       selected=self.op.output_fields)
1267

    
1268
    # Lock all nodes, in shared mode
1269
    self.needed_locks = {}
1270
    self.share_locks[locking.LEVEL_NODE] = 1
1271
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1272

    
1273
  def CheckPrereq(self):
1274
    """Check prerequisites.
1275

1276
    """
1277

    
1278
  @staticmethod
1279
  def _DiagnoseByOS(node_list, rlist):
1280
    """Remaps a per-node return list into an a per-os per-node dictionary
1281

1282
      Args:
1283
        node_list: a list with the names of all nodes
1284
        rlist: a map with node names as keys and OS objects as values
1285

1286
      Returns:
1287
        map: a map with osnames as keys and as value another map, with
1288
             nodes as
1289
             keys and list of OS objects as values
1290
             e.g. {"debian-etch": {"node1": [<object>,...],
1291
                                   "node2": [<object>,]}
1292
                  }
1293

1294
    """
1295
    all_os = {}
1296
    for node_name, nr in rlist.iteritems():
1297
      if not nr:
1298
        continue
1299
      for os_obj in nr:
1300
        if os_obj.name not in all_os:
1301
          # build a list of nodes for this os containing empty lists
1302
          # for each node in node_list
1303
          all_os[os_obj.name] = {}
1304
          for nname in node_list:
1305
            all_os[os_obj.name][nname] = []
1306
        all_os[os_obj.name][node_name].append(os_obj)
1307
    return all_os
1308

    
1309
  def Exec(self, feedback_fn):
1310
    """Compute the list of OSes.
1311

1312
    """
1313
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1314
    node_data = rpc.call_os_diagnose(node_list)
1315
    if node_data == False:
1316
      raise errors.OpExecError("Can't gather the list of OSes")
1317
    pol = self._DiagnoseByOS(node_list, node_data)
1318
    output = []
1319
    for os_name, os_data in pol.iteritems():
1320
      row = []
1321
      for field in self.op.output_fields:
1322
        if field == "name":
1323
          val = os_name
1324
        elif field == "valid":
1325
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1326
        elif field == "node_status":
1327
          val = {}
1328
          for node_name, nos_list in os_data.iteritems():
1329
            val[node_name] = [(v.status, v.path) for v in nos_list]
1330
        else:
1331
          raise errors.ParameterError(field)
1332
        row.append(val)
1333
      output.append(row)
1334

    
1335
    return output
1336

    
1337

    
1338
class LURemoveNode(LogicalUnit):
1339
  """Logical unit for removing a node.
1340

1341
  """
1342
  HPATH = "node-remove"
1343
  HTYPE = constants.HTYPE_NODE
1344
  _OP_REQP = ["node_name"]
1345

    
1346
  def BuildHooksEnv(self):
1347
    """Build hooks env.
1348

1349
    This doesn't run on the target node in the pre phase as a failed
1350
    node would then be impossible to remove.
1351

1352
    """
1353
    env = {
1354
      "OP_TARGET": self.op.node_name,
1355
      "NODE_NAME": self.op.node_name,
1356
      }
1357
    all_nodes = self.cfg.GetNodeList()
1358
    all_nodes.remove(self.op.node_name)
1359
    return env, all_nodes, all_nodes
1360

    
1361
  def CheckPrereq(self):
1362
    """Check prerequisites.
1363

1364
    This checks:
1365
     - the node exists in the configuration
1366
     - it does not have primary or secondary instances
1367
     - it's not the master
1368

1369
    Any errors are signalled by raising errors.OpPrereqError.
1370

1371
    """
1372
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1373
    if node is None:
1374
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1375

    
1376
    instance_list = self.cfg.GetInstanceList()
1377

    
1378
    masternode = self.sstore.GetMasterNode()
1379
    if node.name == masternode:
1380
      raise errors.OpPrereqError("Node is the master node,"
1381
                                 " you need to failover first.")
1382

    
1383
    for instance_name in instance_list:
1384
      instance = self.cfg.GetInstanceInfo(instance_name)
1385
      if node.name == instance.primary_node:
1386
        raise errors.OpPrereqError("Instance %s still running on the node,"
1387
                                   " please remove first." % instance_name)
1388
      if node.name in instance.secondary_nodes:
1389
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1390
                                   " please remove first." % instance_name)
1391
    self.op.node_name = node.name
1392
    self.node = node
1393

    
1394
  def Exec(self, feedback_fn):
1395
    """Removes the node from the cluster.
1396

1397
    """
1398
    node = self.node
1399
    logger.Info("stopping the node daemon and removing configs from node %s" %
1400
                node.name)
1401

    
1402
    self.context.RemoveNode(node.name)
1403

    
1404
    rpc.call_node_leave_cluster(node.name)
1405

    
1406

    
1407
class LUQueryNodes(NoHooksLU):
1408
  """Logical unit for querying nodes.
1409

1410
  """
1411
  _OP_REQP = ["output_fields", "names"]
1412
  REQ_BGL = False
1413

    
1414
  def ExpandNames(self):
1415
    self.dynamic_fields = frozenset([
1416
      "dtotal", "dfree",
1417
      "mtotal", "mnode", "mfree",
1418
      "bootid",
1419
      "ctotal",
1420
      ])
1421

    
1422
    self.static_fields = frozenset([
1423
      "name", "pinst_cnt", "sinst_cnt",
1424
      "pinst_list", "sinst_list",
1425
      "pip", "sip", "tags",
1426
      "serial_no",
1427
      ])
1428

    
1429
    _CheckOutputFields(static=self.static_fields,
1430
                       dynamic=self.dynamic_fields,
1431
                       selected=self.op.output_fields)
1432

    
1433
    self.needed_locks = {}
1434
    self.share_locks[locking.LEVEL_NODE] = 1
1435

    
1436
    if self.op.names:
1437
      self.wanted = _GetWantedNodes(self, self.op.names)
1438
    else:
1439
      self.wanted = locking.ALL_SET
1440

    
1441
    self.do_locking = not self.static_fields.issuperset(self.op.output_fields)
1442
    if self.do_locking:
1443
      # if we don't request only static fields, we need to lock the nodes
1444
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
1445

    
1446

    
1447
  def CheckPrereq(self):
1448
    """Check prerequisites.
1449

1450
    """
1451
    # The validation of the node list is done in the _GetWantedNodes,
1452
    # if non empty, and if empty, there's no validation to do
1453
    pass
1454

    
1455
  def Exec(self, feedback_fn):
1456
    """Computes the list of nodes and their attributes.
1457

1458
    """
1459
    all_info = self.cfg.GetAllNodesInfo()
1460
    if self.do_locking:
1461
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
1462
    elif self.wanted != locking.ALL_SET:
1463
      nodenames = self.wanted
1464
      missing = set(nodenames).difference(all_info.keys())
1465
      if missing:
1466
        raise self.OpExecError(
1467
          "Some nodes were removed before retrieving their data: %s" % missing)
1468
    else:
1469
      nodenames = all_info.keys()
1470
    nodelist = [all_info[name] for name in nodenames]
1471

    
1472
    # begin data gathering
1473

    
1474
    if self.dynamic_fields.intersection(self.op.output_fields):
1475
      live_data = {}
1476
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
1477
      for name in nodenames:
1478
        nodeinfo = node_data.get(name, None)
1479
        if nodeinfo:
1480
          live_data[name] = {
1481
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1482
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1483
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1484
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1485
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1486
            "ctotal": utils.TryConvert(int, nodeinfo['cpu_total']),
1487
            "bootid": nodeinfo['bootid'],
1488
            }
1489
        else:
1490
          live_data[name] = {}
1491
    else:
1492
      live_data = dict.fromkeys(nodenames, {})
1493

    
1494
    node_to_primary = dict([(name, set()) for name in nodenames])
1495
    node_to_secondary = dict([(name, set()) for name in nodenames])
1496

    
1497
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1498
                             "sinst_cnt", "sinst_list"))
1499
    if inst_fields & frozenset(self.op.output_fields):
1500
      instancelist = self.cfg.GetInstanceList()
1501

    
1502
      for instance_name in instancelist:
1503
        inst = self.cfg.GetInstanceInfo(instance_name)
1504
        if inst.primary_node in node_to_primary:
1505
          node_to_primary[inst.primary_node].add(inst.name)
1506
        for secnode in inst.secondary_nodes:
1507
          if secnode in node_to_secondary:
1508
            node_to_secondary[secnode].add(inst.name)
1509

    
1510
    # end data gathering
1511

    
1512
    output = []
1513
    for node in nodelist:
1514
      node_output = []
1515
      for field in self.op.output_fields:
1516
        if field == "name":
1517
          val = node.name
1518
        elif field == "pinst_list":
1519
          val = list(node_to_primary[node.name])
1520
        elif field == "sinst_list":
1521
          val = list(node_to_secondary[node.name])
1522
        elif field == "pinst_cnt":
1523
          val = len(node_to_primary[node.name])
1524
        elif field == "sinst_cnt":
1525
          val = len(node_to_secondary[node.name])
1526
        elif field == "pip":
1527
          val = node.primary_ip
1528
        elif field == "sip":
1529
          val = node.secondary_ip
1530
        elif field == "tags":
1531
          val = list(node.GetTags())
1532
        elif field == "serial_no":
1533
          val = node.serial_no
1534
        elif field in self.dynamic_fields:
1535
          val = live_data[node.name].get(field, None)
1536
        else:
1537
          raise errors.ParameterError(field)
1538
        node_output.append(val)
1539
      output.append(node_output)
1540

    
1541
    return output
1542

    
1543

    
1544
class LUQueryNodeVolumes(NoHooksLU):
1545
  """Logical unit for getting volumes on node(s).
1546

1547
  """
1548
  _OP_REQP = ["nodes", "output_fields"]
1549
  REQ_BGL = False
1550

    
1551
  def ExpandNames(self):
1552
    _CheckOutputFields(static=["node"],
1553
                       dynamic=["phys", "vg", "name", "size", "instance"],
1554
                       selected=self.op.output_fields)
1555

    
1556
    self.needed_locks = {}
1557
    self.share_locks[locking.LEVEL_NODE] = 1
1558
    if not self.op.nodes:
1559
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1560
    else:
1561
      self.needed_locks[locking.LEVEL_NODE] = \
1562
        _GetWantedNodes(self, self.op.nodes)
1563

    
1564
  def CheckPrereq(self):
1565
    """Check prerequisites.
1566

1567
    This checks that the fields required are valid output fields.
1568

1569
    """
1570
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
1571

    
1572
  def Exec(self, feedback_fn):
1573
    """Computes the list of nodes and their attributes.
1574

1575
    """
1576
    nodenames = self.nodes
1577
    volumes = rpc.call_node_volumes(nodenames)
1578

    
1579
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1580
             in self.cfg.GetInstanceList()]
1581

    
1582
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1583

    
1584
    output = []
1585
    for node in nodenames:
1586
      if node not in volumes or not volumes[node]:
1587
        continue
1588

    
1589
      node_vols = volumes[node][:]
1590
      node_vols.sort(key=lambda vol: vol['dev'])
1591

    
1592
      for vol in node_vols:
1593
        node_output = []
1594
        for field in self.op.output_fields:
1595
          if field == "node":
1596
            val = node
1597
          elif field == "phys":
1598
            val = vol['dev']
1599
          elif field == "vg":
1600
            val = vol['vg']
1601
          elif field == "name":
1602
            val = vol['name']
1603
          elif field == "size":
1604
            val = int(float(vol['size']))
1605
          elif field == "instance":
1606
            for inst in ilist:
1607
              if node not in lv_by_node[inst]:
1608
                continue
1609
              if vol['name'] in lv_by_node[inst][node]:
1610
                val = inst.name
1611
                break
1612
            else:
1613
              val = '-'
1614
          else:
1615
            raise errors.ParameterError(field)
1616
          node_output.append(str(val))
1617

    
1618
        output.append(node_output)
1619

    
1620
    return output
1621

    
1622

    
1623
class LUAddNode(LogicalUnit):
1624
  """Logical unit for adding node to the cluster.
1625

1626
  """
1627
  HPATH = "node-add"
1628
  HTYPE = constants.HTYPE_NODE
1629
  _OP_REQP = ["node_name"]
1630

    
1631
  def BuildHooksEnv(self):
1632
    """Build hooks env.
1633

1634
    This will run on all nodes before, and on all nodes + the new node after.
1635

1636
    """
1637
    env = {
1638
      "OP_TARGET": self.op.node_name,
1639
      "NODE_NAME": self.op.node_name,
1640
      "NODE_PIP": self.op.primary_ip,
1641
      "NODE_SIP": self.op.secondary_ip,
1642
      }
1643
    nodes_0 = self.cfg.GetNodeList()
1644
    nodes_1 = nodes_0 + [self.op.node_name, ]
1645
    return env, nodes_0, nodes_1
1646

    
1647
  def CheckPrereq(self):
1648
    """Check prerequisites.
1649

1650
    This checks:
1651
     - the new node is not already in the config
1652
     - it is resolvable
1653
     - its parameters (single/dual homed) matches the cluster
1654

1655
    Any errors are signalled by raising errors.OpPrereqError.
1656

1657
    """
1658
    node_name = self.op.node_name
1659
    cfg = self.cfg
1660

    
1661
    dns_data = utils.HostInfo(node_name)
1662

    
1663
    node = dns_data.name
1664
    primary_ip = self.op.primary_ip = dns_data.ip
1665
    secondary_ip = getattr(self.op, "secondary_ip", None)
1666
    if secondary_ip is None:
1667
      secondary_ip = primary_ip
1668
    if not utils.IsValidIP(secondary_ip):
1669
      raise errors.OpPrereqError("Invalid secondary IP given")
1670
    self.op.secondary_ip = secondary_ip
1671

    
1672
    node_list = cfg.GetNodeList()
1673
    if not self.op.readd and node in node_list:
1674
      raise errors.OpPrereqError("Node %s is already in the configuration" %
1675
                                 node)
1676
    elif self.op.readd and node not in node_list:
1677
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
1678

    
1679
    for existing_node_name in node_list:
1680
      existing_node = cfg.GetNodeInfo(existing_node_name)
1681

    
1682
      if self.op.readd and node == existing_node_name:
1683
        if (existing_node.primary_ip != primary_ip or
1684
            existing_node.secondary_ip != secondary_ip):
1685
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
1686
                                     " address configuration as before")
1687
        continue
1688

    
1689
      if (existing_node.primary_ip == primary_ip or
1690
          existing_node.secondary_ip == primary_ip or
1691
          existing_node.primary_ip == secondary_ip or
1692
          existing_node.secondary_ip == secondary_ip):
1693
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1694
                                   " existing node %s" % existing_node.name)
1695

    
1696
    # check that the type of the node (single versus dual homed) is the
1697
    # same as for the master
1698
    myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
1699
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1700
    newbie_singlehomed = secondary_ip == primary_ip
1701
    if master_singlehomed != newbie_singlehomed:
1702
      if master_singlehomed:
1703
        raise errors.OpPrereqError("The master has no private ip but the"
1704
                                   " new node has one")
1705
      else:
1706
        raise errors.OpPrereqError("The master has a private ip but the"
1707
                                   " new node doesn't have one")
1708

    
1709
    # checks reachablity
1710
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
1711
      raise errors.OpPrereqError("Node not reachable by ping")
1712

    
1713
    if not newbie_singlehomed:
1714
      # check reachability from my secondary ip to newbie's secondary ip
1715
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
1716
                           source=myself.secondary_ip):
1717
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1718
                                   " based ping to noded port")
1719

    
1720
    self.new_node = objects.Node(name=node,
1721
                                 primary_ip=primary_ip,
1722
                                 secondary_ip=secondary_ip)
1723

    
1724
  def Exec(self, feedback_fn):
1725
    """Adds the new node to the cluster.
1726

1727
    """
1728
    new_node = self.new_node
1729
    node = new_node.name
1730

    
1731
    # check connectivity
1732
    result = rpc.call_version([node])[node]
1733
    if result:
1734
      if constants.PROTOCOL_VERSION == result:
1735
        logger.Info("communication to node %s fine, sw version %s match" %
1736
                    (node, result))
1737
      else:
1738
        raise errors.OpExecError("Version mismatch master version %s,"
1739
                                 " node version %s" %
1740
                                 (constants.PROTOCOL_VERSION, result))
1741
    else:
1742
      raise errors.OpExecError("Cannot get version from the new node")
1743

    
1744
    # setup ssh on node
1745
    logger.Info("copy ssh key to node %s" % node)
1746
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1747
    keyarray = []
1748
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1749
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1750
                priv_key, pub_key]
1751

    
1752
    for i in keyfiles:
1753
      f = open(i, 'r')
1754
      try:
1755
        keyarray.append(f.read())
1756
      finally:
1757
        f.close()
1758

    
1759
    result = rpc.call_node_add(node, keyarray[0], keyarray[1], keyarray[2],
1760
                               keyarray[3], keyarray[4], keyarray[5])
1761

    
1762
    if not result:
1763
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1764

    
1765
    # Add node to our /etc/hosts, and add key to known_hosts
1766
    utils.AddHostToEtcHosts(new_node.name)
1767

    
1768
    if new_node.secondary_ip != new_node.primary_ip:
1769
      if not rpc.call_node_tcp_ping(new_node.name,
1770
                                    constants.LOCALHOST_IP_ADDRESS,
1771
                                    new_node.secondary_ip,
1772
                                    constants.DEFAULT_NODED_PORT,
1773
                                    10, False):
1774
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
1775
                                 " you gave (%s). Please fix and re-run this"
1776
                                 " command." % new_node.secondary_ip)
1777

    
1778
    node_verify_list = [self.sstore.GetMasterNode()]
1779
    node_verify_param = {
1780
      'nodelist': [node],
1781
      # TODO: do a node-net-test as well?
1782
    }
1783

    
1784
    result = rpc.call_node_verify(node_verify_list, node_verify_param)
1785
    for verifier in node_verify_list:
1786
      if not result[verifier]:
1787
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
1788
                                 " for remote verification" % verifier)
1789
      if result[verifier]['nodelist']:
1790
        for failed in result[verifier]['nodelist']:
1791
          feedback_fn("ssh/hostname verification failed %s -> %s" %
1792
                      (verifier, result[verifier]['nodelist'][failed]))
1793
        raise errors.OpExecError("ssh/hostname verification failed.")
1794

    
1795
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1796
    # including the node just added
1797
    myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
1798
    dist_nodes = self.cfg.GetNodeList()
1799
    if not self.op.readd:
1800
      dist_nodes.append(node)
1801
    if myself.name in dist_nodes:
1802
      dist_nodes.remove(myself.name)
1803

    
1804
    logger.Debug("Copying hosts and known_hosts to all nodes")
1805
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
1806
      result = rpc.call_upload_file(dist_nodes, fname)
1807
      for to_node in dist_nodes:
1808
        if not result[to_node]:
1809
          logger.Error("copy of file %s to node %s failed" %
1810
                       (fname, to_node))
1811

    
1812
    to_copy = self.sstore.GetFileList()
1813
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
1814
      to_copy.append(constants.VNC_PASSWORD_FILE)
1815
    for fname in to_copy:
1816
      result = rpc.call_upload_file([node], fname)
1817
      if not result[node]:
1818
        logger.Error("could not copy file %s to node %s" % (fname, node))
1819

    
1820
    if self.op.readd:
1821
      self.context.ReaddNode(new_node)
1822
    else:
1823
      self.context.AddNode(new_node)
1824

    
1825

    
1826
class LUQueryClusterInfo(NoHooksLU):
1827
  """Query cluster configuration.
1828

1829
  """
1830
  _OP_REQP = []
1831
  REQ_MASTER = False
1832
  REQ_BGL = False
1833

    
1834
  def ExpandNames(self):
1835
    self.needed_locks = {}
1836

    
1837
  def CheckPrereq(self):
1838
    """No prerequsites needed for this LU.
1839

1840
    """
1841
    pass
1842

    
1843
  def Exec(self, feedback_fn):
1844
    """Return cluster config.
1845

1846
    """
1847
    result = {
1848
      "name": self.sstore.GetClusterName(),
1849
      "software_version": constants.RELEASE_VERSION,
1850
      "protocol_version": constants.PROTOCOL_VERSION,
1851
      "config_version": constants.CONFIG_VERSION,
1852
      "os_api_version": constants.OS_API_VERSION,
1853
      "export_version": constants.EXPORT_VERSION,
1854
      "master": self.sstore.GetMasterNode(),
1855
      "architecture": (platform.architecture()[0], platform.machine()),
1856
      "hypervisor_type": self.sstore.GetHypervisorType(),
1857
      }
1858

    
1859
    return result
1860

    
1861

    
1862
class LUQueryConfigValues(NoHooksLU):
1863
  """Return configuration values.
1864

1865
  """
1866
  _OP_REQP = []
1867
  REQ_BGL = False
1868

    
1869
  def ExpandNames(self):
1870
    self.needed_locks = {}
1871

    
1872
    static_fields = ["cluster_name", "master_node"]
1873
    _CheckOutputFields(static=static_fields,
1874
                       dynamic=[],
1875
                       selected=self.op.output_fields)
1876

    
1877
  def CheckPrereq(self):
1878
    """No prerequisites.
1879

1880
    """
1881
    pass
1882

    
1883
  def Exec(self, feedback_fn):
1884
    """Dump a representation of the cluster config to the standard output.
1885

1886
    """
1887
    values = []
1888
    for field in self.op.output_fields:
1889
      if field == "cluster_name":
1890
        values.append(self.cfg.GetClusterName())
1891
      elif field == "master_node":
1892
        values.append(self.cfg.GetMasterNode())
1893
      else:
1894
        raise errors.ParameterError(field)
1895
    return values
1896

    
1897

    
1898
class LUActivateInstanceDisks(NoHooksLU):
1899
  """Bring up an instance's disks.
1900

1901
  """
1902
  _OP_REQP = ["instance_name"]
1903
  REQ_BGL = False
1904

    
1905
  def ExpandNames(self):
1906
    self._ExpandAndLockInstance()
1907
    self.needed_locks[locking.LEVEL_NODE] = []
1908
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1909

    
1910
  def DeclareLocks(self, level):
1911
    if level == locking.LEVEL_NODE:
1912
      self._LockInstancesNodes()
1913

    
1914
  def CheckPrereq(self):
1915
    """Check prerequisites.
1916

1917
    This checks that the instance is in the cluster.
1918

1919
    """
1920
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1921
    assert self.instance is not None, \
1922
      "Cannot retrieve locked instance %s" % self.op.instance_name
1923

    
1924
  def Exec(self, feedback_fn):
1925
    """Activate the disks.
1926

1927
    """
1928
    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
1929
    if not disks_ok:
1930
      raise errors.OpExecError("Cannot activate block devices")
1931

    
1932
    return disks_info
1933

    
1934

    
1935
def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
1936
  """Prepare the block devices for an instance.
1937

1938
  This sets up the block devices on all nodes.
1939

1940
  Args:
1941
    instance: a ganeti.objects.Instance object
1942
    ignore_secondaries: if true, errors on secondary nodes won't result
1943
                        in an error return from the function
1944

1945
  Returns:
1946
    false if the operation failed
1947
    list of (host, instance_visible_name, node_visible_name) if the operation
1948
         suceeded with the mapping from node devices to instance devices
1949
  """
1950
  device_info = []
1951
  disks_ok = True
1952
  iname = instance.name
1953
  # With the two passes mechanism we try to reduce the window of
1954
  # opportunity for the race condition of switching DRBD to primary
1955
  # before handshaking occured, but we do not eliminate it
1956

    
1957
  # The proper fix would be to wait (with some limits) until the
1958
  # connection has been made and drbd transitions from WFConnection
1959
  # into any other network-connected state (Connected, SyncTarget,
1960
  # SyncSource, etc.)
1961

    
1962
  # 1st pass, assemble on all nodes in secondary mode
1963
  for inst_disk in instance.disks:
1964
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1965
      cfg.SetDiskID(node_disk, node)
1966
      result = rpc.call_blockdev_assemble(node, node_disk, iname, False)
1967
      if not result:
1968
        logger.Error("could not prepare block device %s on node %s"
1969
                     " (is_primary=False, pass=1)" % (inst_disk.iv_name, node))
1970
        if not ignore_secondaries:
1971
          disks_ok = False
1972

    
1973
  # FIXME: race condition on drbd migration to primary
1974

    
1975
  # 2nd pass, do only the primary node
1976
  for inst_disk in instance.disks:
1977
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1978
      if node != instance.primary_node:
1979
        continue
1980
      cfg.SetDiskID(node_disk, node)
1981
      result = rpc.call_blockdev_assemble(node, node_disk, iname, True)
1982
      if not result:
1983
        logger.Error("could not prepare block device %s on node %s"
1984
                     " (is_primary=True, pass=2)" % (inst_disk.iv_name, node))
1985
        disks_ok = False
1986
    device_info.append((instance.primary_node, inst_disk.iv_name, result))
1987

    
1988
  # leave the disks configured for the primary node
1989
  # this is a workaround that would be fixed better by
1990
  # improving the logical/physical id handling
1991
  for disk in instance.disks:
1992
    cfg.SetDiskID(disk, instance.primary_node)
1993

    
1994
  return disks_ok, device_info
1995

    
1996

    
1997
def _StartInstanceDisks(cfg, instance, force):
1998
  """Start the disks of an instance.
1999

2000
  """
2001
  disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
2002
                                           ignore_secondaries=force)
2003
  if not disks_ok:
2004
    _ShutdownInstanceDisks(instance, cfg)
2005
    if force is not None and not force:
2006
      logger.Error("If the message above refers to a secondary node,"
2007
                   " you can retry the operation using '--force'.")
2008
    raise errors.OpExecError("Disk consistency error")
2009

    
2010

    
2011
class LUDeactivateInstanceDisks(NoHooksLU):
2012
  """Shutdown an instance's disks.
2013

2014
  """
2015
  _OP_REQP = ["instance_name"]
2016
  REQ_BGL = False
2017

    
2018
  def ExpandNames(self):
2019
    self._ExpandAndLockInstance()
2020
    self.needed_locks[locking.LEVEL_NODE] = []
2021
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2022

    
2023
  def DeclareLocks(self, level):
2024
    if level == locking.LEVEL_NODE:
2025
      self._LockInstancesNodes()
2026

    
2027
  def CheckPrereq(self):
2028
    """Check prerequisites.
2029

2030
    This checks that the instance is in the cluster.
2031

2032
    """
2033
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2034
    assert self.instance is not None, \
2035
      "Cannot retrieve locked instance %s" % self.op.instance_name
2036

    
2037
  def Exec(self, feedback_fn):
2038
    """Deactivate the disks
2039

2040
    """
2041
    instance = self.instance
2042
    _SafeShutdownInstanceDisks(instance, self.cfg)
2043

    
2044

    
2045
def _SafeShutdownInstanceDisks(instance, cfg):
2046
  """Shutdown block devices of an instance.
2047

2048
  This function checks if an instance is running, before calling
2049
  _ShutdownInstanceDisks.
2050

2051
  """
2052
  ins_l = rpc.call_instance_list([instance.primary_node])
2053
  ins_l = ins_l[instance.primary_node]
2054
  if not type(ins_l) is list:
2055
    raise errors.OpExecError("Can't contact node '%s'" %
2056
                             instance.primary_node)
2057

    
2058
  if instance.name in ins_l:
2059
    raise errors.OpExecError("Instance is running, can't shutdown"
2060
                             " block devices.")
2061

    
2062
  _ShutdownInstanceDisks(instance, cfg)
2063

    
2064

    
2065
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
2066
  """Shutdown block devices of an instance.
2067

2068
  This does the shutdown on all nodes of the instance.
2069

2070
  If the ignore_primary is false, errors on the primary node are
2071
  ignored.
2072

2073
  """
2074
  result = True
2075
  for disk in instance.disks:
2076
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2077
      cfg.SetDiskID(top_disk, node)
2078
      if not rpc.call_blockdev_shutdown(node, top_disk):
2079
        logger.Error("could not shutdown block device %s on node %s" %
2080
                     (disk.iv_name, node))
2081
        if not ignore_primary or node != instance.primary_node:
2082
          result = False
2083
  return result
2084

    
2085

    
2086
def _CheckNodeFreeMemory(cfg, node, reason, requested):
2087
  """Checks if a node has enough free memory.
2088

2089
  This function check if a given node has the needed amount of free
2090
  memory. In case the node has less memory or we cannot get the
2091
  information from the node, this function raise an OpPrereqError
2092
  exception.
2093

2094
  Args:
2095
    - cfg: a ConfigWriter instance
2096
    - node: the node name
2097
    - reason: string to use in the error message
2098
    - requested: the amount of memory in MiB
2099

2100
  """
2101
  nodeinfo = rpc.call_node_info([node], cfg.GetVGName())
2102
  if not nodeinfo or not isinstance(nodeinfo, dict):
2103
    raise errors.OpPrereqError("Could not contact node %s for resource"
2104
                             " information" % (node,))
2105

    
2106
  free_mem = nodeinfo[node].get('memory_free')
2107
  if not isinstance(free_mem, int):
2108
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2109
                             " was '%s'" % (node, free_mem))
2110
  if requested > free_mem:
2111
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2112
                             " needed %s MiB, available %s MiB" %
2113
                             (node, reason, requested, free_mem))
2114

    
2115

    
2116
class LUStartupInstance(LogicalUnit):
2117
  """Starts an instance.
2118

2119
  """
2120
  HPATH = "instance-start"
2121
  HTYPE = constants.HTYPE_INSTANCE
2122
  _OP_REQP = ["instance_name", "force"]
2123
  REQ_BGL = False
2124

    
2125
  def ExpandNames(self):
2126
    self._ExpandAndLockInstance()
2127
    self.needed_locks[locking.LEVEL_NODE] = []
2128
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2129

    
2130
  def DeclareLocks(self, level):
2131
    if level == locking.LEVEL_NODE:
2132
      self._LockInstancesNodes()
2133

    
2134
  def BuildHooksEnv(self):
2135
    """Build hooks env.
2136

2137
    This runs on master, primary and secondary nodes of the instance.
2138

2139
    """
2140
    env = {
2141
      "FORCE": self.op.force,
2142
      }
2143
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2144
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2145
          list(self.instance.secondary_nodes))
2146
    return env, nl, nl
2147

    
2148
  def CheckPrereq(self):
2149
    """Check prerequisites.
2150

2151
    This checks that the instance is in the cluster.
2152

2153
    """
2154
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2155
    assert self.instance is not None, \
2156
      "Cannot retrieve locked instance %s" % self.op.instance_name
2157

    
2158
    # check bridges existance
2159
    _CheckInstanceBridgesExist(instance)
2160

    
2161
    _CheckNodeFreeMemory(self.cfg, instance.primary_node,
2162
                         "starting instance %s" % instance.name,
2163
                         instance.memory)
2164

    
2165
  def Exec(self, feedback_fn):
2166
    """Start the instance.
2167

2168
    """
2169
    instance = self.instance
2170
    force = self.op.force
2171
    extra_args = getattr(self.op, "extra_args", "")
2172

    
2173
    self.cfg.MarkInstanceUp(instance.name)
2174

    
2175
    node_current = instance.primary_node
2176

    
2177
    _StartInstanceDisks(self.cfg, instance, force)
2178

    
2179
    if not rpc.call_instance_start(node_current, instance, extra_args):
2180
      _ShutdownInstanceDisks(instance, self.cfg)
2181
      raise errors.OpExecError("Could not start instance")
2182

    
2183

    
2184
class LURebootInstance(LogicalUnit):
2185
  """Reboot an instance.
2186

2187
  """
2188
  HPATH = "instance-reboot"
2189
  HTYPE = constants.HTYPE_INSTANCE
2190
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2191
  REQ_BGL = False
2192

    
2193
  def ExpandNames(self):
2194
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2195
                                   constants.INSTANCE_REBOOT_HARD,
2196
                                   constants.INSTANCE_REBOOT_FULL]:
2197
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2198
                                  (constants.INSTANCE_REBOOT_SOFT,
2199
                                   constants.INSTANCE_REBOOT_HARD,
2200
                                   constants.INSTANCE_REBOOT_FULL))
2201
    self._ExpandAndLockInstance()
2202
    self.needed_locks[locking.LEVEL_NODE] = []
2203
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2204

    
2205
  def DeclareLocks(self, level):
2206
    if level == locking.LEVEL_NODE:
2207
      primary_only = not constants.INSTANCE_REBOOT_FULL
2208
      self._LockInstancesNodes(primary_only=primary_only)
2209

    
2210
  def BuildHooksEnv(self):
2211
    """Build hooks env.
2212

2213
    This runs on master, primary and secondary nodes of the instance.
2214

2215
    """
2216
    env = {
2217
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2218
      }
2219
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2220
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2221
          list(self.instance.secondary_nodes))
2222
    return env, nl, nl
2223

    
2224
  def CheckPrereq(self):
2225
    """Check prerequisites.
2226

2227
    This checks that the instance is in the cluster.
2228

2229
    """
2230
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2231
    assert self.instance is not None, \
2232
      "Cannot retrieve locked instance %s" % self.op.instance_name
2233

    
2234
    # check bridges existance
2235
    _CheckInstanceBridgesExist(instance)
2236

    
2237
  def Exec(self, feedback_fn):
2238
    """Reboot the instance.
2239

2240
    """
2241
    instance = self.instance
2242
    ignore_secondaries = self.op.ignore_secondaries
2243
    reboot_type = self.op.reboot_type
2244
    extra_args = getattr(self.op, "extra_args", "")
2245

    
2246
    node_current = instance.primary_node
2247

    
2248
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2249
                       constants.INSTANCE_REBOOT_HARD]:
2250
      if not rpc.call_instance_reboot(node_current, instance,
2251
                                      reboot_type, extra_args):
2252
        raise errors.OpExecError("Could not reboot instance")
2253
    else:
2254
      if not rpc.call_instance_shutdown(node_current, instance):
2255
        raise errors.OpExecError("could not shutdown instance for full reboot")
2256
      _ShutdownInstanceDisks(instance, self.cfg)
2257
      _StartInstanceDisks(self.cfg, instance, ignore_secondaries)
2258
      if not rpc.call_instance_start(node_current, instance, extra_args):
2259
        _ShutdownInstanceDisks(instance, self.cfg)
2260
        raise errors.OpExecError("Could not start instance for full reboot")
2261

    
2262
    self.cfg.MarkInstanceUp(instance.name)
2263

    
2264

    
2265
class LUShutdownInstance(LogicalUnit):
2266
  """Shutdown an instance.
2267

2268
  """
2269
  HPATH = "instance-stop"
2270
  HTYPE = constants.HTYPE_INSTANCE
2271
  _OP_REQP = ["instance_name"]
2272
  REQ_BGL = False
2273

    
2274
  def ExpandNames(self):
2275
    self._ExpandAndLockInstance()
2276
    self.needed_locks[locking.LEVEL_NODE] = []
2277
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2278

    
2279
  def DeclareLocks(self, level):
2280
    if level == locking.LEVEL_NODE:
2281
      self._LockInstancesNodes()
2282

    
2283
  def BuildHooksEnv(self):
2284
    """Build hooks env.
2285

2286
    This runs on master, primary and secondary nodes of the instance.
2287

2288
    """
2289
    env = _BuildInstanceHookEnvByObject(self.instance)
2290
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2291
          list(self.instance.secondary_nodes))
2292
    return env, nl, nl
2293

    
2294
  def CheckPrereq(self):
2295
    """Check prerequisites.
2296

2297
    This checks that the instance is in the cluster.
2298

2299
    """
2300
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2301
    assert self.instance is not None, \
2302
      "Cannot retrieve locked instance %s" % self.op.instance_name
2303

    
2304
  def Exec(self, feedback_fn):
2305
    """Shutdown the instance.
2306

2307
    """
2308
    instance = self.instance
2309
    node_current = instance.primary_node
2310
    self.cfg.MarkInstanceDown(instance.name)
2311
    if not rpc.call_instance_shutdown(node_current, instance):
2312
      logger.Error("could not shutdown instance")
2313

    
2314
    _ShutdownInstanceDisks(instance, self.cfg)
2315

    
2316

    
2317
class LUReinstallInstance(LogicalUnit):
2318
  """Reinstall an instance.
2319

2320
  """
2321
  HPATH = "instance-reinstall"
2322
  HTYPE = constants.HTYPE_INSTANCE
2323
  _OP_REQP = ["instance_name"]
2324
  REQ_BGL = False
2325

    
2326
  def ExpandNames(self):
2327
    self._ExpandAndLockInstance()
2328
    self.needed_locks[locking.LEVEL_NODE] = []
2329
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2330

    
2331
  def DeclareLocks(self, level):
2332
    if level == locking.LEVEL_NODE:
2333
      self._LockInstancesNodes()
2334

    
2335
  def BuildHooksEnv(self):
2336
    """Build hooks env.
2337

2338
    This runs on master, primary and secondary nodes of the instance.
2339

2340
    """
2341
    env = _BuildInstanceHookEnvByObject(self.instance)
2342
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2343
          list(self.instance.secondary_nodes))
2344
    return env, nl, nl
2345

    
2346
  def CheckPrereq(self):
2347
    """Check prerequisites.
2348

2349
    This checks that the instance is in the cluster and is not running.
2350

2351
    """
2352
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2353
    assert instance is not None, \
2354
      "Cannot retrieve locked instance %s" % self.op.instance_name
2355

    
2356
    if instance.disk_template == constants.DT_DISKLESS:
2357
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2358
                                 self.op.instance_name)
2359
    if instance.status != "down":
2360
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2361
                                 self.op.instance_name)
2362
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2363
    if remote_info:
2364
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2365
                                 (self.op.instance_name,
2366
                                  instance.primary_node))
2367

    
2368
    self.op.os_type = getattr(self.op, "os_type", None)
2369
    if self.op.os_type is not None:
2370
      # OS verification
2371
      pnode = self.cfg.GetNodeInfo(
2372
        self.cfg.ExpandNodeName(instance.primary_node))
2373
      if pnode is None:
2374
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2375
                                   self.op.pnode)
2376
      os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
2377
      if not os_obj:
2378
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2379
                                   " primary node"  % self.op.os_type)
2380

    
2381
    self.instance = instance
2382

    
2383
  def Exec(self, feedback_fn):
2384
    """Reinstall the instance.
2385

2386
    """
2387
    inst = self.instance
2388

    
2389
    if self.op.os_type is not None:
2390
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2391
      inst.os = self.op.os_type
2392
      self.cfg.Update(inst)
2393

    
2394
    _StartInstanceDisks(self.cfg, inst, None)
2395
    try:
2396
      feedback_fn("Running the instance OS create scripts...")
2397
      if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
2398
        raise errors.OpExecError("Could not install OS for instance %s"
2399
                                 " on node %s" %
2400
                                 (inst.name, inst.primary_node))
2401
    finally:
2402
      _ShutdownInstanceDisks(inst, self.cfg)
2403

    
2404

    
2405
class LURenameInstance(LogicalUnit):
2406
  """Rename an instance.
2407

2408
  """
2409
  HPATH = "instance-rename"
2410
  HTYPE = constants.HTYPE_INSTANCE
2411
  _OP_REQP = ["instance_name", "new_name"]
2412

    
2413
  def BuildHooksEnv(self):
2414
    """Build hooks env.
2415

2416
    This runs on master, primary and secondary nodes of the instance.
2417

2418
    """
2419
    env = _BuildInstanceHookEnvByObject(self.instance)
2420
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2421
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2422
          list(self.instance.secondary_nodes))
2423
    return env, nl, nl
2424

    
2425
  def CheckPrereq(self):
2426
    """Check prerequisites.
2427

2428
    This checks that the instance is in the cluster and is not running.
2429

2430
    """
2431
    instance = self.cfg.GetInstanceInfo(
2432
      self.cfg.ExpandInstanceName(self.op.instance_name))
2433
    if instance is None:
2434
      raise errors.OpPrereqError("Instance '%s' not known" %
2435
                                 self.op.instance_name)
2436
    if instance.status != "down":
2437
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2438
                                 self.op.instance_name)
2439
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2440
    if remote_info:
2441
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2442
                                 (self.op.instance_name,
2443
                                  instance.primary_node))
2444
    self.instance = instance
2445

    
2446
    # new name verification
2447
    name_info = utils.HostInfo(self.op.new_name)
2448

    
2449
    self.op.new_name = new_name = name_info.name
2450
    instance_list = self.cfg.GetInstanceList()
2451
    if new_name in instance_list:
2452
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2453
                                 new_name)
2454

    
2455
    if not getattr(self.op, "ignore_ip", False):
2456
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
2457
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2458
                                   (name_info.ip, new_name))
2459

    
2460

    
2461
  def Exec(self, feedback_fn):
2462
    """Reinstall the instance.
2463

2464
    """
2465
    inst = self.instance
2466
    old_name = inst.name
2467

    
2468
    if inst.disk_template == constants.DT_FILE:
2469
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2470

    
2471
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2472
    # Change the instance lock. This is definitely safe while we hold the BGL
2473
    self.context.glm.remove(locking.LEVEL_INSTANCE, inst.name)
2474
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
2475

    
2476
    # re-read the instance from the configuration after rename
2477
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2478

    
2479
    if inst.disk_template == constants.DT_FILE:
2480
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2481
      result = rpc.call_file_storage_dir_rename(inst.primary_node,
2482
                                                old_file_storage_dir,
2483
                                                new_file_storage_dir)
2484

    
2485
      if not result:
2486
        raise errors.OpExecError("Could not connect to node '%s' to rename"
2487
                                 " directory '%s' to '%s' (but the instance"
2488
                                 " has been renamed in Ganeti)" % (
2489
                                 inst.primary_node, old_file_storage_dir,
2490
                                 new_file_storage_dir))
2491

    
2492
      if not result[0]:
2493
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2494
                                 " (but the instance has been renamed in"
2495
                                 " Ganeti)" % (old_file_storage_dir,
2496
                                               new_file_storage_dir))
2497

    
2498
    _StartInstanceDisks(self.cfg, inst, None)
2499
    try:
2500
      if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
2501
                                          "sda", "sdb"):
2502
        msg = ("Could not run OS rename script for instance %s on node %s"
2503
               " (but the instance has been renamed in Ganeti)" %
2504
               (inst.name, inst.primary_node))
2505
        logger.Error(msg)
2506
    finally:
2507
      _ShutdownInstanceDisks(inst, self.cfg)
2508

    
2509

    
2510
class LURemoveInstance(LogicalUnit):
2511
  """Remove an instance.
2512

2513
  """
2514
  HPATH = "instance-remove"
2515
  HTYPE = constants.HTYPE_INSTANCE
2516
  _OP_REQP = ["instance_name", "ignore_failures"]
2517
  REQ_BGL = False
2518

    
2519
  def ExpandNames(self):
2520
    self._ExpandAndLockInstance()
2521
    self.needed_locks[locking.LEVEL_NODE] = []
2522
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2523

    
2524
  def DeclareLocks(self, level):
2525
    if level == locking.LEVEL_NODE:
2526
      self._LockInstancesNodes()
2527

    
2528
  def BuildHooksEnv(self):
2529
    """Build hooks env.
2530

2531
    This runs on master, primary and secondary nodes of the instance.
2532

2533
    """
2534
    env = _BuildInstanceHookEnvByObject(self.instance)
2535
    nl = [self.sstore.GetMasterNode()]
2536
    return env, nl, nl
2537

    
2538
  def CheckPrereq(self):
2539
    """Check prerequisites.
2540

2541
    This checks that the instance is in the cluster.
2542

2543
    """
2544
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2545
    assert self.instance is not None, \
2546
      "Cannot retrieve locked instance %s" % self.op.instance_name
2547

    
2548
  def Exec(self, feedback_fn):
2549
    """Remove the instance.
2550

2551
    """
2552
    instance = self.instance
2553
    logger.Info("shutting down instance %s on node %s" %
2554
                (instance.name, instance.primary_node))
2555

    
2556
    if not rpc.call_instance_shutdown(instance.primary_node, instance):
2557
      if self.op.ignore_failures:
2558
        feedback_fn("Warning: can't shutdown instance")
2559
      else:
2560
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2561
                                 (instance.name, instance.primary_node))
2562

    
2563
    logger.Info("removing block devices for instance %s" % instance.name)
2564

    
2565
    if not _RemoveDisks(instance, self.cfg):
2566
      if self.op.ignore_failures:
2567
        feedback_fn("Warning: can't remove instance's disks")
2568
      else:
2569
        raise errors.OpExecError("Can't remove instance's disks")
2570

    
2571
    logger.Info("removing instance %s out of cluster config" % instance.name)
2572

    
2573
    self.cfg.RemoveInstance(instance.name)
2574
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
2575

    
2576

    
2577
class LUQueryInstances(NoHooksLU):
2578
  """Logical unit for querying instances.
2579

2580
  """
2581
  _OP_REQP = ["output_fields", "names"]
2582
  REQ_BGL = False
2583

    
2584
  def ExpandNames(self):
2585
    self.dynamic_fields = frozenset(["oper_state", "oper_ram", "status"])
2586
    self.static_fields = frozenset([
2587
      "name", "os", "pnode", "snodes",
2588
      "admin_state", "admin_ram",
2589
      "disk_template", "ip", "mac", "bridge",
2590
      "sda_size", "sdb_size", "vcpus", "tags",
2591
      "network_port", "kernel_path", "initrd_path",
2592
      "hvm_boot_order", "hvm_acpi", "hvm_pae",
2593
      "hvm_cdrom_image_path", "hvm_nic_type",
2594
      "hvm_disk_type", "vnc_bind_address",
2595
      "serial_no",
2596
      ])
2597
    _CheckOutputFields(static=self.static_fields,
2598
                       dynamic=self.dynamic_fields,
2599
                       selected=self.op.output_fields)
2600

    
2601
    self.needed_locks = {}
2602
    self.share_locks[locking.LEVEL_INSTANCE] = 1
2603
    self.share_locks[locking.LEVEL_NODE] = 1
2604

    
2605
    if self.op.names:
2606
      self.wanted = _GetWantedInstances(self, self.op.names)
2607
    else:
2608
      self.wanted = locking.ALL_SET
2609

    
2610
    self.do_locking = not self.static_fields.issuperset(self.op.output_fields)
2611
    if self.do_locking:
2612
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
2613
      self.needed_locks[locking.LEVEL_NODE] = []
2614
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2615

    
2616
  def DeclareLocks(self, level):
2617
    if level == locking.LEVEL_NODE and self.do_locking:
2618
      self._LockInstancesNodes()
2619

    
2620
  def CheckPrereq(self):
2621
    """Check prerequisites.
2622

2623
    """
2624
    pass
2625

    
2626
  def Exec(self, feedback_fn):
2627
    """Computes the list of nodes and their attributes.
2628

2629
    """
2630
    all_info = self.cfg.GetAllInstancesInfo()
2631
    if self.do_locking:
2632
      instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2633
    elif self.wanted != locking.ALL_SET:
2634
      instance_names = self.wanted
2635
      missing = set(instance_names).difference(all_info.keys())
2636
      if missing:
2637
        raise self.OpExecError(
2638
          "Some instances were removed before retrieving their data: %s"
2639
          % missing)
2640
    else:
2641
      instance_names = all_info.keys()
2642
    instance_list = [all_info[iname] for iname in instance_names]
2643

    
2644
    # begin data gathering
2645

    
2646
    nodes = frozenset([inst.primary_node for inst in instance_list])
2647

    
2648
    bad_nodes = []
2649
    if self.dynamic_fields.intersection(self.op.output_fields):
2650
      live_data = {}
2651
      node_data = rpc.call_all_instances_info(nodes)
2652
      for name in nodes:
2653
        result = node_data[name]
2654
        if result:
2655
          live_data.update(result)
2656
        elif result == False:
2657
          bad_nodes.append(name)
2658
        # else no instance is alive
2659
    else:
2660
      live_data = dict([(name, {}) for name in instance_names])
2661

    
2662
    # end data gathering
2663

    
2664
    output = []
2665
    for instance in instance_list:
2666
      iout = []
2667
      for field in self.op.output_fields:
2668
        if field == "name":
2669
          val = instance.name
2670
        elif field == "os":
2671
          val = instance.os
2672
        elif field == "pnode":
2673
          val = instance.primary_node
2674
        elif field == "snodes":
2675
          val = list(instance.secondary_nodes)
2676
        elif field == "admin_state":
2677
          val = (instance.status != "down")
2678
        elif field == "oper_state":
2679
          if instance.primary_node in bad_nodes:
2680
            val = None
2681
          else:
2682
            val = bool(live_data.get(instance.name))
2683
        elif field == "status":
2684
          if instance.primary_node in bad_nodes:
2685
            val = "ERROR_nodedown"
2686
          else:
2687
            running = bool(live_data.get(instance.name))
2688
            if running:
2689
              if instance.status != "down":
2690
                val = "running"
2691
              else:
2692
                val = "ERROR_up"
2693
            else:
2694
              if instance.status != "down":
2695
                val = "ERROR_down"
2696
              else:
2697
                val = "ADMIN_down"
2698
        elif field == "admin_ram":
2699
          val = instance.memory
2700
        elif field == "oper_ram":
2701
          if instance.primary_node in bad_nodes:
2702
            val = None
2703
          elif instance.name in live_data:
2704
            val = live_data[instance.name].get("memory", "?")
2705
          else:
2706
            val = "-"
2707
        elif field == "disk_template":
2708
          val = instance.disk_template
2709
        elif field == "ip":
2710
          val = instance.nics[0].ip
2711
        elif field == "bridge":
2712
          val = instance.nics[0].bridge
2713
        elif field == "mac":
2714
          val = instance.nics[0].mac
2715
        elif field == "sda_size" or field == "sdb_size":
2716
          disk = instance.FindDisk(field[:3])
2717
          if disk is None:
2718
            val = None
2719
          else:
2720
            val = disk.size
2721
        elif field == "vcpus":
2722
          val = instance.vcpus
2723
        elif field == "tags":
2724
          val = list(instance.GetTags())
2725
        elif field == "serial_no":
2726
          val = instance.serial_no
2727
        elif field in ("network_port", "kernel_path", "initrd_path",
2728
                       "hvm_boot_order", "hvm_acpi", "hvm_pae",
2729
                       "hvm_cdrom_image_path", "hvm_nic_type",
2730
                       "hvm_disk_type", "vnc_bind_address"):
2731
          val = getattr(instance, field, None)
2732
          if val is not None:
2733
            pass
2734
          elif field in ("hvm_nic_type", "hvm_disk_type",
2735
                         "kernel_path", "initrd_path"):
2736
            val = "default"
2737
          else:
2738
            val = "-"
2739
        else:
2740
          raise errors.ParameterError(field)
2741
        iout.append(val)
2742
      output.append(iout)
2743

    
2744
    return output
2745

    
2746

    
2747
class LUFailoverInstance(LogicalUnit):
2748
  """Failover an instance.
2749

2750
  """
2751
  HPATH = "instance-failover"
2752
  HTYPE = constants.HTYPE_INSTANCE
2753
  _OP_REQP = ["instance_name", "ignore_consistency"]
2754
  REQ_BGL = False
2755

    
2756
  def ExpandNames(self):
2757
    self._ExpandAndLockInstance()
2758
    self.needed_locks[locking.LEVEL_NODE] = []
2759
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2760

    
2761
  def DeclareLocks(self, level):
2762
    if level == locking.LEVEL_NODE:
2763
      self._LockInstancesNodes()
2764

    
2765
  def BuildHooksEnv(self):
2766
    """Build hooks env.
2767

2768
    This runs on master, primary and secondary nodes of the instance.
2769

2770
    """
2771
    env = {
2772
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2773
      }
2774
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2775
    nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
2776
    return env, nl, nl
2777

    
2778
  def CheckPrereq(self):
2779
    """Check prerequisites.
2780

2781
    This checks that the instance is in the cluster.
2782

2783
    """
2784
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2785
    assert self.instance is not None, \
2786
      "Cannot retrieve locked instance %s" % self.op.instance_name
2787

    
2788
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2789
      raise errors.OpPrereqError("Instance's disk layout is not"
2790
                                 " network mirrored, cannot failover.")
2791

    
2792
    secondary_nodes = instance.secondary_nodes
2793
    if not secondary_nodes:
2794
      raise errors.ProgrammerError("no secondary node but using "
2795
                                   "a mirrored disk template")
2796

    
2797
    target_node = secondary_nodes[0]
2798
    # check memory requirements on the secondary node
2799
    _CheckNodeFreeMemory(self.cfg, target_node, "failing over instance %s" %
2800
                         instance.name, instance.memory)
2801

    
2802
    # check bridge existance
2803
    brlist = [nic.bridge for nic in instance.nics]
2804
    if not rpc.call_bridges_exist(target_node, brlist):
2805
      raise errors.OpPrereqError("One or more target bridges %s does not"
2806
                                 " exist on destination node '%s'" %
2807
                                 (brlist, target_node))
2808

    
2809
  def Exec(self, feedback_fn):
2810
    """Failover an instance.
2811

2812
    The failover is done by shutting it down on its present node and
2813
    starting it on the secondary.
2814

2815
    """
2816
    instance = self.instance
2817

    
2818
    source_node = instance.primary_node
2819
    target_node = instance.secondary_nodes[0]
2820

    
2821
    feedback_fn("* checking disk consistency between source and target")
2822
    for dev in instance.disks:
2823
      # for drbd, these are drbd over lvm
2824
      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
2825
        if instance.status == "up" and not self.op.ignore_consistency:
2826
          raise errors.OpExecError("Disk %s is degraded on target node,"
2827
                                   " aborting failover." % dev.iv_name)
2828

    
2829
    feedback_fn("* shutting down instance on source node")
2830
    logger.Info("Shutting down instance %s on node %s" %
2831
                (instance.name, source_node))
2832

    
2833
    if not rpc.call_instance_shutdown(source_node, instance):
2834
      if self.op.ignore_consistency:
2835
        logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2836
                     " anyway. Please make sure node %s is down"  %
2837
                     (instance.name, source_node, source_node))
2838
      else:
2839
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2840
                                 (instance.name, source_node))
2841

    
2842
    feedback_fn("* deactivating the instance's disks on source node")
2843
    if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
2844
      raise errors.OpExecError("Can't shut down the instance's disks.")
2845

    
2846
    instance.primary_node = target_node
2847
    # distribute new instance config to the other nodes
2848
    self.cfg.Update(instance)
2849

    
2850
    # Only start the instance if it's marked as up
2851
    if instance.status == "up":
2852
      feedback_fn("* activating the instance's disks on target node")
2853
      logger.Info("Starting instance %s on node %s" %
2854
                  (instance.name, target_node))
2855

    
2856
      disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
2857
                                               ignore_secondaries=True)
2858
      if not disks_ok:
2859
        _ShutdownInstanceDisks(instance, self.cfg)
2860
        raise errors.OpExecError("Can't activate the instance's disks")
2861

    
2862
      feedback_fn("* starting the instance on the target node")
2863
      if not rpc.call_instance_start(target_node, instance, None):
2864
        _ShutdownInstanceDisks(instance, self.cfg)
2865
        raise errors.OpExecError("Could not start instance %s on node %s." %
2866
                                 (instance.name, target_node))
2867

    
2868

    
2869
def _CreateBlockDevOnPrimary(cfg, node, instance, device, info):
2870
  """Create a tree of block devices on the primary node.
2871

2872
  This always creates all devices.
2873

2874
  """
2875
  if device.children:
2876
    for child in device.children:
2877
      if not _CreateBlockDevOnPrimary(cfg, node, instance, child, info):
2878
        return False
2879

    
2880
  cfg.SetDiskID(device, node)
2881
  new_id = rpc.call_blockdev_create(node, device, device.size,
2882
                                    instance.name, True, info)
2883
  if not new_id:
2884
    return False
2885
  if device.physical_id is None:
2886
    device.physical_id = new_id
2887
  return True
2888

    
2889

    
2890
def _CreateBlockDevOnSecondary(cfg, node, instance, device, force, info):
2891
  """Create a tree of block devices on a secondary node.
2892

2893
  If this device type has to be created on secondaries, create it and
2894
  all its children.
2895

2896
  If not, just recurse to children keeping the same 'force' value.
2897

2898
  """
2899
  if device.CreateOnSecondary():
2900
    force = True
2901
  if device.children:
2902
    for child in device.children:
2903
      if not _CreateBlockDevOnSecondary(cfg, node, instance,
2904
                                        child, force, info):
2905
        return False
2906

    
2907
  if not force:
2908
    return True
2909
  cfg.SetDiskID(device, node)
2910
  new_id = rpc.call_blockdev_create(node, device, device.size,
2911
                                    instance.name, False, info)
2912
  if not new_id:
2913
    return False
2914
  if device.physical_id is None:
2915
    device.physical_id = new_id
2916
  return True
2917

    
2918

    
2919
def _GenerateUniqueNames(cfg, exts):
2920
  """Generate a suitable LV name.
2921

2922
  This will generate a logical volume name for the given instance.
2923

2924
  """
2925
  results = []
2926
  for val in exts:
2927
    new_id = cfg.GenerateUniqueID()
2928
    results.append("%s%s" % (new_id, val))
2929
  return results
2930

    
2931

    
2932
def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name,
2933
                         p_minor, s_minor):
2934
  """Generate a drbd8 device complete with its children.
2935

2936
  """
2937
  port = cfg.AllocatePort()
2938
  vgname = cfg.GetVGName()
2939
  shared_secret = cfg.GenerateDRBDSecret()
2940
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2941
                          logical_id=(vgname, names[0]))
2942
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2943
                          logical_id=(vgname, names[1]))
2944
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
2945
                          logical_id=(primary, secondary, port,
2946
                                      p_minor, s_minor,
2947
                                      shared_secret),
2948
                          children=[dev_data, dev_meta],
2949
                          iv_name=iv_name)
2950
  return drbd_dev
2951

    
2952

    
2953
def _GenerateDiskTemplate(cfg, template_name,
2954
                          instance_name, primary_node,
2955
                          secondary_nodes, disk_sz, swap_sz,
2956
                          file_storage_dir, file_driver):
2957
  """Generate the entire disk layout for a given template type.
2958

2959
  """
2960
  #TODO: compute space requirements
2961

    
2962
  vgname = cfg.GetVGName()
2963
  if template_name == constants.DT_DISKLESS:
2964
    disks = []
2965
  elif template_name == constants.DT_PLAIN:
2966
    if len(secondary_nodes) != 0:
2967
      raise errors.ProgrammerError("Wrong template configuration")
2968

    
2969
    names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
2970
    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2971
                           logical_id=(vgname, names[0]),
2972
                           iv_name = "sda")
2973
    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2974
                           logical_id=(vgname, names[1]),
2975
                           iv_name = "sdb")
2976
    disks = [sda_dev, sdb_dev]
2977
  elif template_name == constants.DT_DRBD8:
2978
    if len(secondary_nodes) != 1:
2979
      raise errors.ProgrammerError("Wrong template configuration")
2980
    remote_node = secondary_nodes[0]
2981
    (minor_pa, minor_pb,
2982
     minor_sa, minor_sb) = cfg.AllocateDRBDMinor(
2983
      [primary_node, primary_node, remote_node, remote_node], instance_name)
2984

    
2985
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2986
                                       ".sdb_data", ".sdb_meta"])
2987
    drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2988
                                        disk_sz, names[0:2], "sda",
2989
                                        minor_pa, minor_sa)
2990
    drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2991
                                        swap_sz, names[2:4], "sdb",
2992
                                        minor_pb, minor_sb)
2993
    disks = [drbd_sda_dev, drbd_sdb_dev]
2994
  elif template_name == constants.DT_FILE:
2995
    if len(secondary_nodes) != 0:
2996
      raise errors.ProgrammerError("Wrong template configuration")
2997

    
2998
    file_sda_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk_sz,
2999
                                iv_name="sda", logical_id=(file_driver,
3000
                                "%s/sda" % file_storage_dir))
3001
    file_sdb_dev = objects.Disk(dev_type=constants.LD_FILE, size=swap_sz,
3002
                                iv_name="sdb", logical_id=(file_driver,
3003
                                "%s/sdb" % file_storage_dir))
3004
    disks = [file_sda_dev, file_sdb_dev]
3005
  else:
3006
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
3007
  return disks
3008

    
3009

    
3010
def _GetInstanceInfoText(instance):
3011
  """Compute that text that should be added to the disk's metadata.
3012

3013
  """
3014
  return "originstname+%s" % instance.name
3015

    
3016

    
3017
def _CreateDisks(cfg, instance):
3018
  """Create all disks for an instance.
3019

3020
  This abstracts away some work from AddInstance.
3021

3022
  Args:
3023
    instance: the instance object
3024

3025
  Returns:
3026
    True or False showing the success of the creation process
3027

3028
  """
3029
  info = _GetInstanceInfoText(instance)
3030

    
3031
  if instance.disk_template == constants.DT_FILE:
3032
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3033
    result = rpc.call_file_storage_dir_create(instance.primary_node,
3034
                                              file_storage_dir)
3035

    
3036
    if not result:
3037
      logger.Error("Could not connect to node '%s'" % instance.primary_node)
3038
      return False
3039

    
3040
    if not result[0]:
3041
      logger.Error("failed to create directory '%s'" % file_storage_dir)
3042
      return False
3043

    
3044
  for device in instance.disks:
3045
    logger.Info("creating volume %s for instance %s" %
3046
                (device.iv_name, instance.name))
3047
    #HARDCODE
3048
    for secondary_node in instance.secondary_nodes:
3049
      if not _CreateBlockDevOnSecondary(cfg, secondary_node, instance,
3050
                                        device, False, info):
3051
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
3052
                     (device.iv_name, device, secondary_node))
3053
        return False
3054
    #HARDCODE
3055
    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
3056
                                    instance, device, info):
3057
      logger.Error("failed to create volume %s on primary!" %
3058
                   device.iv_name)
3059
      return False
3060

    
3061
  return True
3062

    
3063

    
3064
def _RemoveDisks(instance, cfg):
3065
  """Remove all disks for an instance.
3066

3067
  This abstracts away some work from `AddInstance()` and
3068
  `RemoveInstance()`. Note that in case some of the devices couldn't
3069
  be removed, the removal will continue with the other ones (compare
3070
  with `_CreateDisks()`).
3071

3072
  Args:
3073
    instance: the instance object
3074

3075
  Returns:
3076
    True or False showing the success of the removal proces
3077

3078
  """
3079
  logger.Info("removing block devices for instance %s" % instance.name)
3080

    
3081
  result = True
3082
  for device in instance.disks:
3083
    for node, disk in device.ComputeNodeTree(instance.primary_node):
3084
      cfg.SetDiskID(disk, node)
3085
      if not rpc.call_blockdev_remove(node, disk):
3086
        logger.Error("could not remove block device %s on node %s,"
3087
                     " continuing anyway" %
3088
                     (device.iv_name, node))
3089
        result = False
3090

    
3091
  if instance.disk_template == constants.DT_FILE:
3092
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3093
    if not rpc.call_file_storage_dir_remove(instance.primary_node,
3094
                                            file_storage_dir):
3095
      logger.Error("could not remove directory '%s'" % file_storage_dir)
3096
      result = False
3097

    
3098
  return result
3099

    
3100

    
3101
def _ComputeDiskSize(disk_template, disk_size, swap_size):
3102
  """Compute disk size requirements in the volume group
3103

3104
  This is currently hard-coded for the two-drive layout.
3105

3106
  """
3107
  # Required free disk space as a function of disk and swap space
3108
  req_size_dict = {
3109
    constants.DT_DISKLESS: None,
3110
    constants.DT_PLAIN: disk_size + swap_size,
3111
    # 256 MB are added for drbd metadata, 128MB for each drbd device
3112
    constants.DT_DRBD8: disk_size + swap_size + 256,
3113
    constants.DT_FILE: None,
3114
  }
3115

    
3116
  if disk_template not in req_size_dict:
3117
    raise errors.ProgrammerError("Disk template '%s' size requirement"
3118
                                 " is unknown" %  disk_template)
3119

    
3120
  return req_size_dict[disk_template]
3121

    
3122

    
3123
class LUCreateInstance(LogicalUnit):
3124
  """Create an instance.
3125

3126
  """
3127
  HPATH = "instance-add"
3128
  HTYPE = constants.HTYPE_INSTANCE
3129
  _OP_REQP = ["instance_name", "mem_size", "disk_size",
3130
              "disk_template", "swap_size", "mode", "start", "vcpus",
3131
              "wait_for_sync", "ip_check", "mac"]
3132
  REQ_BGL = False
3133

    
3134
  def _ExpandNode(self, node):
3135
    """Expands and checks one node name.
3136

3137
    """
3138
    node_full = self.cfg.ExpandNodeName(node)
3139
    if node_full is None:
3140
      raise errors.OpPrereqError("Unknown node %s" % node)
3141
    return node_full
3142

    
3143
  def ExpandNames(self):
3144
    """ExpandNames for CreateInstance.
3145

3146
    Figure out the right locks for instance creation.
3147

3148
    """
3149
    self.needed_locks = {}
3150

    
3151
    # set optional parameters to none if they don't exist
3152
    for attr in ["kernel_path", "initrd_path", "pnode", "snode",
3153
                 "iallocator", "hvm_boot_order", "hvm_acpi", "hvm_pae",
3154
                 "hvm_cdrom_image_path", "hvm_nic_type", "hvm_disk_type",
3155
                 "vnc_bind_address"]:
3156
      if not hasattr(self.op, attr):
3157
        setattr(self.op, attr, None)
3158

    
3159
    # verify creation mode
3160
    if self.op.mode not in (constants.INSTANCE_CREATE,
3161
                            constants.INSTANCE_IMPORT):
3162
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
3163
                                 self.op.mode)
3164
    # disk template and mirror node verification
3165
    if self.op.disk_template not in constants.DISK_TEMPLATES:
3166
      raise errors.OpPrereqError("Invalid disk template name")
3167

    
3168
    #### instance parameters check
3169

    
3170
    # instance name verification
3171
    hostname1 = utils.HostInfo(self.op.instance_name)
3172
    self.op.instance_name = instance_name = hostname1.name
3173

    
3174
    # this is just a preventive check, but someone might still add this
3175
    # instance in the meantime, and creation will fail at lock-add time
3176
    if instance_name in self.cfg.GetInstanceList():
3177
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3178
                                 instance_name)
3179

    
3180
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
3181

    
3182
    # ip validity checks
3183
    ip = getattr(self.op, "ip", None)
3184
    if ip is None or ip.lower() == "none":
3185
      inst_ip = None
3186
    elif ip.lower() == "auto":
3187
      inst_ip = hostname1.ip
3188
    else:
3189
      if not utils.IsValidIP(ip):
3190
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
3191
                                   " like a valid IP" % ip)
3192
      inst_ip = ip
3193
    self.inst_ip = self.op.ip = inst_ip
3194
    # used in CheckPrereq for ip ping check
3195
    self.check_ip = hostname1.ip
3196

    
3197
    # MAC address verification
3198
    if self.op.mac != "auto":
3199
      if not utils.IsValidMac(self.op.mac.lower()):
3200
        raise errors.OpPrereqError("invalid MAC address specified: %s" %
3201
                                   self.op.mac)
3202

    
3203
    # boot order verification
3204
    if self.op.hvm_boot_order is not None:
3205
      if len(self.op.hvm_boot_order.strip("acdn")) != 0:
3206
        raise errors.OpPrereqError("invalid boot order specified,"
3207
                                   " must be one or more of [acdn]")
3208
    # file storage checks
3209
    if (self.op.file_driver and
3210
        not self.op.file_driver in constants.FILE_DRIVER):
3211
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
3212
                                 self.op.file_driver)
3213

    
3214
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
3215
      raise errors.OpPrereqError("File storage directory path not absolute")
3216

    
3217
    ### Node/iallocator related checks
3218
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
3219
      raise errors.OpPrereqError("One and only one of iallocator and primary"
3220
                                 " node must be given")
3221

    
3222
    if self.op.iallocator:
3223
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3224
    else:
3225
      self.op.pnode = self._ExpandNode(self.op.pnode)
3226
      nodelist = [self.op.pnode]
3227
      if self.op.snode is not None:
3228
        self.op.snode = self._ExpandNode(self.op.snode)
3229
        nodelist.append(self.op.snode)
3230
      self.needed_locks[locking.LEVEL_NODE] = nodelist
3231

    
3232
    # in case of import lock the source node too
3233
    if self.op.mode == constants.INSTANCE_IMPORT:
3234
      src_node = getattr(self.op, "src_node", None)
3235
      src_path = getattr(self.op, "src_path", None)
3236

    
3237
      if src_node is None or src_path is None:
3238
        raise errors.OpPrereqError("Importing an instance requires source"
3239
                                   " node and path options")
3240

    
3241
      if not os.path.isabs(src_path):
3242
        raise errors.OpPrereqError("The source path must be absolute")
3243

    
3244
      self.op.src_node = src_node = self._ExpandNode(src_node)
3245
      if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
3246
        self.needed_locks[locking.LEVEL_NODE].append(src_node)
3247

    
3248
    else: # INSTANCE_CREATE
3249
      if getattr(self.op, "os_type", None) is None:
3250
        raise errors.OpPrereqError("No guest OS specified")
3251

    
3252
  def _RunAllocator(self):
3253
    """Run the allocator based on input opcode.
3254

3255
    """
3256
    disks = [{"size": self.op.disk_size, "mode": "w"},
3257
             {"size": self.op.swap_size, "mode": "w"}]
3258
    nics = [{"mac": self.op.mac, "ip": getattr(self.op, "ip", None),
3259
             "bridge": self.op.bridge}]
3260
    ial = IAllocator(self.cfg, self.sstore,
3261
                     mode=constants.IALLOCATOR_MODE_ALLOC,
3262
                     name=self.op.instance_name,
3263
                     disk_template=self.op.disk_template,
3264
                     tags=[],
3265
                     os=self.op.os_type,
3266
                     vcpus=self.op.vcpus,
3267
                     mem_size=self.op.mem_size,
3268
                     disks=disks,
3269
                     nics=nics,
3270
                     )
3271

    
3272
    ial.Run(self.op.iallocator)
3273

    
3274
    if not ial.success:
3275
      raise errors.OpPrereqError("Can't compute nodes using"
3276
                                 " iallocator '%s': %s" % (self.op.iallocator,
3277
                                                           ial.info))
3278
    if len(ial.nodes) != ial.required_nodes:
3279
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3280
                                 " of nodes (%s), required %s" %
3281
                                 (self.op.iallocator, len(ial.nodes),
3282
                                  ial.required_nodes))
3283
    self.op.pnode = ial.nodes[0]
3284
    logger.ToStdout("Selected nodes for the instance: %s" %
3285
                    (", ".join(ial.nodes),))
3286
    logger.Info("Selected nodes for instance %s via iallocator %s: %s" %
3287
                (self.op.instance_name, self.op.iallocator, ial.nodes))
3288
    if ial.required_nodes == 2:
3289
      self.op.snode = ial.nodes[1]
3290

    
3291
  def BuildHooksEnv(self):
3292
    """Build hooks env.
3293

3294
    This runs on master, primary and secondary nodes of the instance.
3295

3296
    """
3297
    env = {
3298
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
3299
      "INSTANCE_DISK_SIZE": self.op.disk_size,
3300
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
3301
      "INSTANCE_ADD_MODE": self.op.mode,
3302
      }
3303
    if self.op.mode == constants.INSTANCE_IMPORT:
3304
      env["INSTANCE_SRC_NODE"] = self.op.src_node
3305
      env["INSTANCE_SRC_PATH"] = self.op.src_path
3306
      env["INSTANCE_SRC_IMAGE"] = self.src_image
3307

    
3308
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
3309
      primary_node=self.op.pnode,
3310
      secondary_nodes=self.secondaries,
3311
      status=self.instance_status,
3312
      os_type=self.op.os_type,
3313
      memory=self.op.mem_size,
3314
      vcpus=self.op.vcpus,
3315
      nics=[(self.inst_ip, self.op.bridge, self.op.mac)],
3316
    ))
3317

    
3318
    nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
3319
          self.secondaries)
3320
    return env, nl, nl
3321

    
3322

    
3323
  def CheckPrereq(self):
3324
    """Check prerequisites.
3325

3326
    """
3327
    if (not self.cfg.GetVGName() and
3328
        self.op.disk_template not in constants.DTS_NOT_LVM):
3329
      raise errors.OpPrereqError("Cluster does not support lvm-based"
3330
                                 " instances")
3331

    
3332
    if self.op.mode == constants.INSTANCE_IMPORT:
3333
      src_node = self.op.src_node
3334
      src_path = self.op.src_path
3335

    
3336
      export_info = rpc.call_export_info(src_node, src_path)
3337

    
3338
      if not export_info:
3339
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
3340

    
3341
      if not export_info.has_section(constants.INISECT_EXP):
3342
        raise errors.ProgrammerError("Corrupted export config")
3343

    
3344
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
3345
      if (int(ei_version) != constants.EXPORT_VERSION):
3346
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
3347
                                   (ei_version, constants.EXPORT_VERSION))
3348

    
3349
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
3350
        raise errors.OpPrereqError("Can't import instance with more than"
3351
                                   " one data disk")
3352

    
3353
      # FIXME: are the old os-es, disk sizes, etc. useful?
3354
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
3355
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
3356
                                                         'disk0_dump'))
3357
      self.src_image = diskimage
3358

    
3359
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
3360

    
3361
    if self.op.start and not self.op.ip_check:
3362
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
3363
                                 " adding an instance in start mode")
3364

    
3365
    if self.op.ip_check:
3366
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
3367
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3368
                                   (self.check_ip, instance_name))
3369

    
3370
    # bridge verification
3371
    bridge = getattr(self.op, "bridge", None)
3372
    if bridge is None:
3373
      self.op.bridge = self.cfg.GetDefBridge()
3374
    else:
3375
      self.op.bridge = bridge
3376

    
3377
    #### allocator run
3378

    
3379
    if self.op.iallocator is not None:
3380
      self._RunAllocator()
3381

    
3382
    #### node related checks
3383

    
3384
    # check primary node
3385
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
3386
    assert self.pnode is not None, \
3387
      "Cannot retrieve locked node %s" % self.op.pnode
3388
    self.secondaries = []
3389

    
3390
    # mirror node verification
3391
    if self.op.disk_template in constants.DTS_NET_MIRROR:
3392
      if self.op.snode is None:
3393
        raise errors.OpPrereqError("The networked disk templates need"
3394
                                   " a mirror node")
3395
      if self.op.snode == pnode.name:
3396
        raise errors.OpPrereqError("The secondary node cannot be"
3397
                                   " the primary node.")
3398
      self.secondaries.append(self.op.snode)
3399

    
3400
    req_size = _ComputeDiskSize(self.op.disk_template,
3401
                                self.op.disk_size, self.op.swap_size)
3402

    
3403
    # Check lv size requirements
3404
    if req_size is not None:
3405
      nodenames = [pnode.name] + self.secondaries
3406
      nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
3407
      for node in nodenames:
3408
        info = nodeinfo.get(node, None)
3409
        if not info:
3410
          raise errors.OpPrereqError("Cannot get current information"
3411
                                     " from node '%s'" % node)
3412
        vg_free = info.get('vg_free', None)
3413
        if not isinstance(vg_free, int):
3414
          raise errors.OpPrereqError("Can't compute free disk space on"
3415
                                     " node %s" % node)
3416
        if req_size > info['vg_free']:
3417
          raise errors.OpPrereqError("Not enough disk space on target node %s."
3418
                                     " %d MB available, %d MB required" %
3419
                                     (node, info['vg_free'], req_size))
3420

    
3421
    # os verification
3422
    os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
3423
    if not os_obj:
3424
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
3425
                                 " primary node"  % self.op.os_type)
3426

    
3427
    if self.op.kernel_path == constants.VALUE_NONE:
3428
      raise errors.OpPrereqError("Can't set instance kernel to none")
3429

    
3430
    # bridge check on primary node
3431
    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
3432
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
3433
                                 " destination node '%s'" %
3434
                                 (self.op.bridge, pnode.name))
3435

    
3436
    # memory check on primary node
3437
    if self.op.start:
3438
      _CheckNodeFreeMemory(self.cfg, self.pnode.name,
3439
                           "creating instance %s" % self.op.instance_name,
3440
                           self.op.mem_size)
3441

    
3442
    # hvm_cdrom_image_path verification
3443
    if self.op.hvm_cdrom_image_path is not None:
3444
      # FIXME (als): shouldn't these checks happen on the destination node?
3445
      if not os.path.isabs(self.op.hvm_cdrom_image_path):
3446
        raise errors.OpPrereqError("The path to the HVM CDROM image must"
3447
                                   " be an absolute path or None, not %s" %
3448
                                   self.op.hvm_cdrom_image_path)
3449
      if not os.path.isfile(self.op.hvm_cdrom_image_path):
3450
        raise errors.OpPrereqError("The HVM CDROM image must either be a"
3451
                                   " regular file or a symlink pointing to"
3452
                                   " an existing regular file, not %s" %
3453
                                   self.op.hvm_cdrom_image_path)
3454

    
3455
    # vnc_bind_address verification
3456
    if self.op.vnc_bind_address is not None:
3457
      if not utils.IsValidIP(self.op.vnc_bind_address):
3458
        raise errors.OpPrereqError("given VNC bind address '%s' doesn't look"
3459
                                   " like a valid IP address" %
3460
                                   self.op.vnc_bind_address)
3461

    
3462
    # Xen HVM device type checks
3463
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
3464
      if self.op.hvm_nic_type not in constants.HT_HVM_VALID_NIC_TYPES:
3465
        raise errors.OpPrereqError("Invalid NIC type %s specified for Xen HVM"
3466
                                   " hypervisor" % self.op.hvm_nic_type)
3467
      if self.op.hvm_disk_type not in constants.HT_HVM_VALID_DISK_TYPES:
3468
        raise errors.OpPrereqError("Invalid disk type %s specified for Xen HVM"
3469
                                   " hypervisor" % self.op.hvm_disk_type)
3470

    
3471
    if self.op.start:
3472
      self.instance_status = 'up'
3473
    else:
3474
      self.instance_status = 'down'
3475

    
3476
  def Exec(self, feedback_fn):
3477
    """Create and add the instance to the cluster.
3478

3479
    """
3480
    instance = self.op.instance_name
3481
    pnode_name = self.pnode.name
3482

    
3483
    if self.op.mac == "auto":
3484
      mac_address = self.cfg.GenerateMAC()
3485
    else:
3486
      mac_address = self.op.mac
3487

    
3488
    nic = objects.NIC(bridge=self.op.bridge, mac=mac_address)
3489
    if self.inst_ip is not None:
3490
      nic.ip = self.inst_ip
3491

    
3492
    ht_kind = self.sstore.GetHypervisorType()
3493
    if ht_kind in constants.HTS_REQ_PORT:
3494
      network_port = self.cfg.AllocatePort()
3495
    else:
3496
      network_port = None
3497

    
3498
    if self.op.vnc_bind_address is None:
3499
      self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
3500

    
3501
    # this is needed because os.path.join does not accept None arguments
3502
    if self.op.file_storage_dir is None:
3503
      string_file_storage_dir = ""
3504
    else:
3505
      string_file_storage_dir = self.op.file_storage_dir
3506

    
3507
    # build the full file storage dir path
3508
    file_storage_dir = os.path.normpath(os.path.join(
3509
                                        self.sstore.GetFileStorageDir(),
3510
                                        string_file_storage_dir, instance))
3511

    
3512

    
3513
    disks = _GenerateDiskTemplate(self.cfg,
3514
                                  self.op.disk_template,
3515
                                  instance, pnode_name,
3516
                                  self.secondaries, self.op.disk_size,
3517
                                  self.op.swap_size,
3518
                                  file_storage_dir,
3519
                                  self.op.file_driver)
3520

    
3521
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3522
                            primary_node=pnode_name,
3523
                            memory=self.op.mem_size,
3524
                            vcpus=self.op.vcpus,
3525
                            nics=[nic], disks=disks,
3526
                            disk_template=self.op.disk_template,
3527
                            status=self.instance_status,
3528
                            network_port=network_port,
3529
                            kernel_path=self.op.kernel_path,
3530
                            initrd_path=self.op.initrd_path,
3531
                            hvm_boot_order=self.op.hvm_boot_order,
3532
                            hvm_acpi=self.op.hvm_acpi,
3533
                            hvm_pae=self.op.hvm_pae,
3534
                            hvm_cdrom_image_path=self.op.hvm_cdrom_image_path,
3535
                            vnc_bind_address=self.op.vnc_bind_address,
3536
                            hvm_nic_type=self.op.hvm_nic_type,
3537
                            hvm_disk_type=self.op.hvm_disk_type,
3538
                            )
3539

    
3540
    feedback_fn("* creating instance disks...")
3541
    if not _CreateDisks(self.cfg, iobj):
3542
      _RemoveDisks(iobj, self.cfg)
3543
      self.cfg.ReleaseDRBDMinors(instance)
3544
      raise errors.OpExecError("Device creation failed, reverting...")
3545

    
3546
    feedback_fn("adding instance %s to cluster config" % instance)
3547

    
3548
    self.cfg.AddInstance(iobj)
3549
    # Declare that we don't want to remove the instance lock anymore, as we've
3550
    # added the instance to the config
3551
    del self.remove_locks[locking.LEVEL_INSTANCE]
3552
    # Remove the temp. assignements for the instance's drbds
3553
    self.cfg.ReleaseDRBDMinors(instance)
3554

    
3555
    if self.op.wait_for_sync:
3556
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc)
3557
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3558
      # make sure the disks are not degraded (still sync-ing is ok)
3559
      time.sleep(15)
3560
      feedback_fn("* checking mirrors status")
3561
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc, oneshot=True)
3562
    else:
3563
      disk_abort = False
3564

    
3565
    if disk_abort:
3566
      _RemoveDisks(iobj, self.cfg)
3567
      self.cfg.RemoveInstance(iobj.name)
3568
      # Make sure the instance lock gets removed
3569
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
3570
      raise errors.OpExecError("There are some degraded disks for"
3571
                               " this instance")
3572

    
3573
    feedback_fn("creating os for instance %s on node %s" %
3574
                (instance, pnode_name))
3575

    
3576
    if iobj.disk_template != constants.DT_DISKLESS:
3577
      if self.op.mode == constants.INSTANCE_CREATE:
3578
        feedback_fn("* running the instance OS create scripts...")
3579
        if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
3580
          raise errors.OpExecError("could not add os for instance %s"
3581
                                   " on node %s" %
3582
                                   (instance, pnode_name))
3583

    
3584
      elif self.op.mode == constants.INSTANCE_IMPORT:
3585
        feedback_fn("* running the instance OS import scripts...")
3586
        src_node = self.op.src_node
3587
        src_image = self.src_image
3588
        if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
3589
                                                src_node, src_image):
3590
          raise errors.OpExecError("Could not import os for instance"
3591
                                   " %s on node %s" %
3592
                                   (instance, pnode_name))
3593
      else:
3594
        # also checked in the prereq part
3595
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3596
                                     % self.op.mode)
3597

    
3598
    if self.op.start:
3599
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
3600
      feedback_fn("* starting instance...")
3601
      if not rpc.call_instance_start(pnode_name, iobj, None):
3602
        raise errors.OpExecError("Could not start instance")
3603

    
3604

    
3605
class LUConnectConsole(NoHooksLU):
3606
  """Connect to an instance's console.
3607

3608
  This is somewhat special in that it returns the command line that
3609
  you need to run on the master node in order to connect to the
3610
  console.
3611

3612
  """
3613
  _OP_REQP = ["instance_name"]
3614
  REQ_BGL = False
3615

    
3616
  def ExpandNames(self):
3617
    self._ExpandAndLockInstance()
3618

    
3619
  def CheckPrereq(self):
3620
    """Check prerequisites.
3621

3622
    This checks that the instance is in the cluster.
3623

3624
    """
3625
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3626
    assert self.instance is not None, \
3627
      "Cannot retrieve locked instance %s" % self.op.instance_name
3628

    
3629
  def Exec(self, feedback_fn):
3630
    """Connect to the console of an instance
3631

3632
    """
3633
    instance = self.instance
3634
    node = instance.primary_node
3635

    
3636
    node_insts = rpc.call_instance_list([node])[node]
3637
    if node_insts is False:
3638
      raise errors.OpExecError("Can't connect to node %s." % node)
3639

    
3640
    if instance.name not in node_insts:
3641
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3642

    
3643
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
3644

    
3645
    hyper = hypervisor.GetHypervisor()
3646
    console_cmd = hyper.GetShellCommandForConsole(instance)
3647

    
3648
    # build ssh cmdline
3649
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
3650

    
3651

    
3652
class LUReplaceDisks(LogicalUnit):
3653
  """Replace the disks of an instance.
3654

3655
  """
3656
  HPATH = "mirrors-replace"
3657
  HTYPE = constants.HTYPE_INSTANCE
3658
  _OP_REQP = ["instance_name", "mode", "disks"]
3659
  REQ_BGL = False
3660

    
3661
  def ExpandNames(self):
3662
    self._ExpandAndLockInstance()
3663

    
3664
    if not hasattr(self.op, "remote_node"):
3665
      self.op.remote_node = None
3666

    
3667
    ia_name = getattr(self.op, "iallocator", None)
3668
    if ia_name is not None:
3669
      if self.op.remote_node is not None:
3670
        raise errors.OpPrereqError("Give either the iallocator or the new"
3671
                                   " secondary, not both")
3672
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3673
    elif self.op.remote_node is not None:
3674
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
3675
      if remote_node is None:
3676
        raise errors.OpPrereqError("Node '%s' not known" %
3677
                                   self.op.remote_node)
3678
      self.op.remote_node = remote_node
3679
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
3680
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3681
    else:
3682
      self.needed_locks[locking.LEVEL_NODE] = []
3683
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3684

    
3685
  def DeclareLocks(self, level):
3686
    # If we're not already locking all nodes in the set we have to declare the
3687
    # instance's primary/secondary nodes.
3688
    if (level == locking.LEVEL_NODE and
3689
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
3690
      self._LockInstancesNodes()
3691

    
3692
  def _RunAllocator(self):
3693
    """Compute a new secondary node using an IAllocator.
3694

3695
    """
3696
    ial = IAllocator(self.cfg, self.sstore,
3697
                     mode=constants.IALLOCATOR_MODE_RELOC,
3698
                     name=self.op.instance_name,
3699
                     relocate_from=[self.sec_node])
3700

    
3701
    ial.Run(self.op.iallocator)
3702

    
3703
    if not ial.success:
3704
      raise errors.OpPrereqError("Can't compute nodes using"
3705
                                 " iallocator '%s': %s" % (self.op.iallocator,
3706
                                                           ial.info))
3707
    if len(ial.nodes) != ial.required_nodes:
3708
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3709
                                 " of nodes (%s), required %s" %
3710
                                 (len(ial.nodes), ial.required_nodes))
3711
    self.op.remote_node = ial.nodes[0]
3712
    logger.ToStdout("Selected new secondary for the instance: %s" %
3713
                    self.op.remote_node)
3714

    
3715
  def BuildHooksEnv(self):
3716
    """Build hooks env.
3717

3718
    This runs on the master, the primary and all the secondaries.
3719

3720
    """
3721
    env = {
3722
      "MODE": self.op.mode,
3723
      "NEW_SECONDARY": self.op.remote_node,
3724
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3725
      }
3726
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3727
    nl = [
3728
      self.sstore.GetMasterNode(),
3729
      self.instance.primary_node,
3730
      ]
3731
    if self.op.remote_node is not None:
3732
      nl.append(self.op.remote_node)
3733
    return env, nl, nl
3734

    
3735
  def CheckPrereq(self):
3736
    """Check prerequisites.
3737

3738
    This checks that the instance is in the cluster.
3739

3740
    """
3741
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3742
    assert instance is not None, \
3743
      "Cannot retrieve locked instance %s" % self.op.instance_name
3744
    self.instance = instance
3745

    
3746
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3747
      raise errors.OpPrereqError("Instance's disk layout is not"
3748
                                 " network mirrored.")
3749

    
3750
    if len(instance.secondary_nodes) != 1:
3751
      raise errors.OpPrereqError("The instance has a strange layout,"
3752
                                 " expected one secondary but found %d" %
3753
                                 len(instance.secondary_nodes))
3754

    
3755
    self.sec_node = instance.secondary_nodes[0]
3756

    
3757
    ia_name = getattr(self.op, "iallocator", None)
3758
    if ia_name is not None:
3759
      self._RunAllocator()
3760

    
3761
    remote_node = self.op.remote_node
3762
    if remote_node is not None:
3763
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
3764
      assert self.remote_node_info is not None, \
3765
        "Cannot retrieve locked node %s" % remote_node
3766
    else:
3767
      self.remote_node_info = None
3768
    if remote_node == instance.primary_node:
3769
      raise errors.OpPrereqError("The specified node is the primary node of"
3770
                                 " the instance.")
3771
    elif remote_node == self.sec_node:
3772
      if self.op.mode == constants.REPLACE_DISK_SEC:
3773
        # this is for DRBD8, where we can't execute the same mode of
3774
        # replacement as for drbd7 (no different port allocated)
3775
        raise errors.OpPrereqError("Same secondary given, cannot execute"
3776
                                   " replacement")
3777
    if instance.disk_template == constants.DT_DRBD8:
3778
      if (self.op.mode == constants.REPLACE_DISK_ALL and
3779
          remote_node is not None):
3780
        # switch to replace secondary mode
3781
        self.op.mode = constants.REPLACE_DISK_SEC
3782

    
3783
      if self.op.mode == constants.REPLACE_DISK_ALL:
3784
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
3785
                                   " secondary disk replacement, not"
3786
                                   " both at once")
3787
      elif self.op.mode == constants.REPLACE_DISK_PRI:
3788
        if remote_node is not None:
3789
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
3790
                                     " the secondary while doing a primary"
3791
                                     " node disk replacement")
3792
        self.tgt_node = instance.primary_node
3793
        self.oth_node = instance.secondary_nodes[0]
3794
      elif self.op.mode == constants.REPLACE_DISK_SEC:
3795
        self.new_node = remote_node # this can be None, in which case
3796
                                    # we don't change the secondary
3797
        self.tgt_node = instance.secondary_nodes[0]
3798
        self.oth_node = instance.primary_node
3799
      else:
3800
        raise errors.ProgrammerError("Unhandled disk replace mode")
3801

    
3802
    for name in self.op.disks:
3803
      if instance.FindDisk(name) is None:
3804
        raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3805
                                   (name, instance.name))
3806

    
3807
  def _ExecD8DiskOnly(self, feedback_fn):
3808
    """Replace a disk on the primary or secondary for dbrd8.
3809

3810
    The algorithm for replace is quite complicated:
3811
      - for each disk to be replaced:
3812
        - create new LVs on the target node with unique names
3813
        - detach old LVs from the drbd device
3814
        - rename old LVs to name_replaced.<time_t>
3815
        - rename new LVs to old LVs
3816
        - attach the new LVs (with the old names now) to the drbd device
3817
      - wait for sync across all devices
3818
      - for each modified disk:
3819
        - remove old LVs (which have the name name_replaces.<time_t>)
3820

3821
    Failures are not very well handled.
3822

3823
    """
3824
    steps_total = 6
3825
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3826
    instance = self.instance
3827
    iv_names = {}
3828
    vgname = self.cfg.GetVGName()
3829
    # start of work
3830
    cfg = self.cfg
3831
    tgt_node = self.tgt_node
3832
    oth_node = self.oth_node
3833

    
3834
    # Step: check device activation
3835
    self.proc.LogStep(1, steps_total, "check device existence")
3836
    info("checking volume groups")
3837
    my_vg = cfg.GetVGName()
3838
    results = rpc.call_vg_list([oth_node, tgt_node])
3839
    if not results:
3840
      raise errors.OpExecError("Can't list volume groups on the nodes")
3841
    for node in oth_node, tgt_node:
3842
      res = results.get(node, False)
3843
      if not res or my_vg not in res:
3844
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3845
                                 (my_vg, node))
3846
    for dev in instance.disks:
3847
      if not dev.iv_name in self.op.disks:
3848
        continue
3849
      for node in tgt_node, oth_node:
3850
        info("checking %s on %s" % (dev.iv_name, node))
3851
        cfg.SetDiskID(dev, node)
3852
        if not rpc.call_blockdev_find(node, dev):
3853
          raise errors.OpExecError("Can't find device %s on node %s" %
3854
                                   (dev.iv_name, node))
3855

    
3856
    # Step: check other node consistency
3857
    self.proc.LogStep(2, steps_total, "check peer consistency")
3858
    for dev in instance.disks:
3859
      if not dev.iv_name in self.op.disks:
3860
        continue
3861
      info("checking %s consistency on %s" % (dev.iv_name, oth_node))
3862
      if not _CheckDiskConsistency(self.cfg, dev, oth_node,
3863
                                   oth_node==instance.primary_node):
3864
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
3865
                                 " to replace disks on this node (%s)" %
3866
                                 (oth_node, tgt_node))
3867

    
3868
    # Step: create new storage
3869
    self.proc.LogStep(3, steps_total, "allocate new storage")
3870
    for dev in instance.disks:
3871
      if not dev.iv_name in self.op.disks:
3872
        continue
3873
      size = dev.size
3874
      cfg.SetDiskID(dev, tgt_node)
3875
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3876
      names = _GenerateUniqueNames(cfg, lv_names)
3877
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3878
                             logical_id=(vgname, names[0]))
3879
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3880
                             logical_id=(vgname, names[1]))
3881
      new_lvs = [lv_data, lv_meta]
3882
      old_lvs = dev.children
3883
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
3884
      info("creating new local storage on %s for %s" %
3885
           (tgt_node, dev.iv_name))
3886
      # since we *always* want to create this LV, we use the
3887
      # _Create...OnPrimary (which forces the creation), even if we
3888
      # are talking about the secondary node
3889
      for new_lv in new_lvs:
3890
        if not _CreateBlockDevOnPrimary(cfg, tgt_node, instance, new_lv,
3891
                                        _GetInstanceInfoText(instance)):
3892
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3893
                                   " node '%s'" %
3894
                                   (new_lv.logical_id[1], tgt_node))
3895

    
3896
    # Step: for each lv, detach+rename*2+attach
3897
    self.proc.LogStep(4, steps_total, "change drbd configuration")
3898
    for dev, old_lvs, new_lvs in iv_names.itervalues():
3899
      info("detaching %s drbd from local storage" % dev.iv_name)
3900
      if not rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
3901
        raise errors.OpExecError("Can't detach drbd from local storage on node"
3902
                                 " %s for device %s" % (tgt_node, dev.iv_name))
3903
      #dev.children = []
3904
      #cfg.Update(instance)
3905

    
3906
      # ok, we created the new LVs, so now we know we have the needed
3907
      # storage; as such, we proceed on the target node to rename
3908
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
3909
      # using the assumption that logical_id == physical_id (which in
3910
      # turn is the unique_id on that node)
3911

    
3912
      # FIXME(iustin): use a better name for the replaced LVs
3913
      temp_suffix = int(time.time())
3914
      ren_fn = lambda d, suff: (d.physical_id[0],
3915
                                d.physical_id[1] + "_replaced-%s" % suff)
3916
      # build the rename list based on what LVs exist on the node
3917
      rlist = []
3918
      for to_ren in old_lvs:
3919
        find_res = rpc.call_blockdev_find(tgt_node, to_ren)
3920
        if find_res is not None: # device exists
3921
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
3922

    
3923
      info("renaming the old LVs on the target node")
3924
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3925
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
3926
      # now we rename the new LVs to the old LVs
3927
      info("renaming the new LVs on the target node")
3928
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
3929
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3930
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
3931

    
3932
      for old, new in zip(old_lvs, new_lvs):
3933
        new.logical_id = old.logical_id
3934
        cfg.SetDiskID(new, tgt_node)
3935

    
3936
      for disk in old_lvs:
3937
        disk.logical_id = ren_fn(disk, temp_suffix)
3938
        cfg.SetDiskID(disk, tgt_node)
3939

    
3940
      # now that the new lvs have the old name, we can add them to the device
3941
      info("adding new mirror component on %s" % tgt_node)
3942
      if not rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
3943
        for new_lv in new_lvs:
3944
          if not rpc.call_blockdev_remove(tgt_node, new_lv):
3945
            warning("Can't rollback device %s", hint="manually cleanup unused"
3946
                    " logical volumes")
3947
        raise errors.OpExecError("Can't add local storage to drbd")
3948

    
3949
      dev.children = new_lvs
3950
      cfg.Update(instance)
3951

    
3952
    # Step: wait for sync
3953

    
3954
    # this can fail as the old devices are degraded and _WaitForSync
3955
    # does a combined result over all disks, so we don't check its
3956
    # return value
3957
    self.proc.LogStep(5, steps_total, "sync devices")
3958
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3959

    
3960
    # so check manually all the devices
3961
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3962
      cfg.SetDiskID(dev, instance.primary_node)
3963
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3964
      if is_degr:
3965
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3966

    
3967
    # Step: remove old storage
3968
    self.proc.LogStep(6, steps_total, "removing old storage")
3969
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3970
      info("remove logical volumes for %s" % name)
3971
      for lv in old_lvs:
3972
        cfg.SetDiskID(lv, tgt_node)
3973
        if not rpc.call_blockdev_remove(tgt_node, lv):
3974
          warning("Can't remove old LV", hint="manually remove unused LVs")
3975
          continue
3976

    
3977
  def _ExecD8Secondary(self, feedback_fn):
3978
    """Replace the secondary node for drbd8.
3979

3980
    The algorithm for replace is quite complicated:
3981
      - for all disks of the instance:
3982
        - create new LVs on the new node with same names
3983
        - shutdown the drbd device on the old secondary
3984
        - disconnect the drbd network on the primary
3985
        - create the drbd device on the new secondary
3986
        - network attach the drbd on the primary, using an artifice:
3987
          the drbd code for Attach() will connect to the network if it
3988
          finds a device which is connected to the good local disks but
3989
          not network enabled
3990
      - wait for sync across all devices
3991
      - remove all disks from the old secondary
3992

3993
    Failures are not very well handled.
3994

3995
    """
3996
    steps_total = 6
3997
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3998
    instance = self.instance
3999
    iv_names = {}
4000
    vgname = self.cfg.GetVGName()
4001
    # start of work
4002
    cfg = self.cfg
4003
    old_node = self.tgt_node
4004
    new_node = self.new_node
4005
    pri_node = instance.primary_node
4006

    
4007
    # Step: check device activation
4008
    self.proc.LogStep(1, steps_total, "check device existence")
4009
    info("checking volume groups")
4010
    my_vg = cfg.GetVGName()
4011
    results = rpc.call_vg_list([pri_node, new_node])
4012
    if not results:
4013
      raise errors.OpExecError("Can't list volume groups on the nodes")
4014
    for node in pri_node, new_node:
4015
      res = results.get(node, False)
4016
      if not res or my_vg not in res:
4017
        raise errors.OpExecError("Volume group '%s' not found on %s" %
4018
                                 (my_vg, node))
4019
    for dev in instance.disks:
4020
      if not dev.iv_name in self.op.disks:
4021
        continue
4022
      info("checking %s on %s" % (dev.iv_name, pri_node))
4023
      cfg.SetDiskID(dev, pri_node)
4024
      if not rpc.call_blockdev_find(pri_node, dev):
4025
        raise errors.OpExecError("Can't find device %s on node %s" %
4026
                                 (dev.iv_name, pri_node))
4027

    
4028
    # Step: check other node consistency
4029
    self.proc.LogStep(2, steps_total, "check peer consistency")
4030
    for dev in instance.disks:
4031
      if not dev.iv_name in self.op.disks:
4032
        continue
4033
      info("checking %s consistency on %s" % (dev.iv_name, pri_node))
4034
      if not _CheckDiskConsistency(self.cfg, dev, pri_node, True, ldisk=True):
4035
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
4036
                                 " unsafe to replace the secondary" %
4037
                                 pri_node)
4038

    
4039
    # Step: create new storage
4040
    self.proc.LogStep(3, steps_total, "allocate new storage")
4041
    for dev in instance.disks:
4042
      size = dev.size
4043
      info("adding new local storage on %s for %s" % (new_node, dev.iv_name))
4044
      # since we *always* want to create this LV, we use the
4045
      # _Create...OnPrimary (which forces the creation), even if we
4046
      # are talking about the secondary node
4047
      for new_lv in dev.children:
4048
        if not _CreateBlockDevOnPrimary(cfg, new_node, instance, new_lv,
4049
                                        _GetInstanceInfoText(instance)):
4050
          raise errors.OpExecError("Failed to create new LV named '%s' on"
4051
                                   " node '%s'" %
4052
                                   (new_lv.logical_id[1], new_node))
4053

    
4054

    
4055
    # Step 4: dbrd minors and drbd setups changes
4056
    # after this, we must manually remove the drbd minors on both the
4057
    # error and the success paths
4058
    minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
4059
                                   instance.name)
4060
    logging.debug("Allocated minors %s" % (minors,))
4061
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
4062
    for dev, new_minor in zip(instance.disks, minors):
4063
      size = dev.size
4064
      info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
4065
      # create new devices on new_node
4066
      if pri_node == dev.logical_id[0]:
4067
        new_logical_id = (pri_node, new_node,
4068
                          dev.logical_id[2], dev.logical_id[3], new_minor,
4069
                          dev.logical_id[5])
4070
      else:
4071
        new_logical_id = (new_node, pri_node,
4072
                          dev.logical_id[2], new_minor, dev.logical_id[4],
4073
                          dev.logical_id[5])
4074
      iv_names[dev.iv_name] = (dev, dev.children, new_logical_id)
4075
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
4076
                    new_logical_id)
4077
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
4078
                              logical_id=new_logical_id,
4079
                              children=dev.children)
4080
      if not _CreateBlockDevOnSecondary(cfg, new_node, instance,
4081
                                        new_drbd, False,
4082
                                      _GetInstanceInfoText(instance)):
4083
        self.cfg.ReleaseDRBDMinors(instance.name)
4084
        raise errors.OpExecError("Failed to create new DRBD on"
4085
                                 " node '%s'" % new_node)
4086

    
4087
    for dev in instance.disks:
4088
      # we have new devices, shutdown the drbd on the old secondary
4089
      info("shutting down drbd for %s on old node" % dev.iv_name)
4090
      cfg.SetDiskID(dev, old_node)
4091
      if not rpc.call_blockdev_shutdown(old_node, dev):
4092
        warning("Failed to shutdown drbd for %s on old node" % dev.iv_name,
4093
                hint="Please cleanup this device manually as soon as possible")
4094

    
4095
    info("detaching primary drbds from the network (=> standalone)")
4096
    done = 0
4097
    for dev in instance.disks:
4098
      cfg.SetDiskID(dev, pri_node)
4099
      # set the network part of the physical (unique in bdev terms) id
4100
      # to None, meaning detach from network
4101
      dev.physical_id = (None, None, None, None) + dev.physical_id[4:]
4102
      # and 'find' the device, which will 'fix' it to match the
4103
      # standalone state
4104
      if rpc.call_blockdev_find(pri_node, dev):
4105
        done += 1
4106
      else:
4107
        warning("Failed to detach drbd %s from network, unusual case" %
4108
                dev.iv_name)
4109

    
4110
    if not done:
4111
      # no detaches succeeded (very unlikely)
4112
      self.cfg.ReleaseDRBDMinors(instance.name)
4113
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
4114

    
4115
    # if we managed to detach at least one, we update all the disks of
4116
    # the instance to point to the new secondary
4117
    info("updating instance configuration")
4118
    for dev, _, new_logical_id in iv_names.itervalues():
4119
      dev.logical_id = new_logical_id
4120
      cfg.SetDiskID(dev, pri_node)
4121
    cfg.Update(instance)
4122
    # we can remove now the temp minors as now the new values are
4123
    # written to the config file (and therefore stable)
4124
    self.cfg.ReleaseDRBDMinors(instance.name)
4125

    
4126
    # and now perform the drbd attach
4127
    info("attaching primary drbds to new secondary (standalone => connected)")
4128
    failures = []
4129
    for dev in instance.disks:
4130
      info("attaching primary drbd for %s to new secondary node" % dev.iv_name)
4131
      # since the attach is smart, it's enough to 'find' the device,
4132
      # it will automatically activate the network, if the physical_id
4133
      # is correct
4134
      cfg.SetDiskID(dev, pri_node)
4135
      logging.debug("Disk to attach: %s", dev)
4136
      if not rpc.call_blockdev_find(pri_node, dev):
4137
        warning("can't attach drbd %s to new secondary!" % dev.iv_name,
4138
                "please do a gnt-instance info to see the status of disks")
4139

    
4140
    # this can fail as the old devices are degraded and _WaitForSync
4141
    # does a combined result over all disks, so we don't check its
4142
    # return value
4143
    self.proc.LogStep(5, steps_total, "sync devices")
4144
    _WaitForSync(cfg, instance, self.proc, unlock=True)
4145

    
4146
    # so check manually all the devices
4147
    for name, (dev, old_lvs, _) in iv_names.iteritems():
4148
      cfg.SetDiskID(dev, pri_node)
4149
      is_degr = rpc.call_blockdev_find(pri_node, dev)[5]
4150
      if is_degr:
4151
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
4152

    
4153
    self.proc.LogStep(6, steps_total, "removing old storage")
4154
    for name, (dev, old_lvs, _) in iv_names.iteritems():
4155
      info("remove logical volumes for %s" % name)
4156
      for lv in old_lvs:
4157
        cfg.SetDiskID(lv, old_node)
4158
        if not rpc.call_blockdev_remove(old_node, lv):
4159
          warning("Can't remove LV on old secondary",
4160
                  hint="Cleanup stale volumes by hand")
4161

    
4162
  def Exec(self, feedback_fn):
4163
    """Execute disk replacement.
4164

4165
    This dispatches the disk replacement to the appropriate handler.
4166

4167
    """
4168
    instance = self.instance
4169

    
4170
    # Activate the instance disks if we're replacing them on a down instance
4171
    if instance.status == "down":
4172
      _StartInstanceDisks(self.cfg, instance, True)
4173

    
4174
    if instance.disk_template == constants.DT_DRBD8:
4175
      if self.op.remote_node is None:
4176
        fn = self._ExecD8DiskOnly
4177
      else:
4178
        fn = self._ExecD8Secondary
4179
    else:
4180
      raise errors.ProgrammerError("Unhandled disk replacement case")
4181

    
4182
    ret = fn(feedback_fn)
4183

    
4184
    # Deactivate the instance disks if we're replacing them on a down instance
4185
    if instance.status == "down":
4186
      _SafeShutdownInstanceDisks(instance, self.cfg)
4187

    
4188
    return ret
4189

    
4190

    
4191
class LUGrowDisk(LogicalUnit):
4192
  """Grow a disk of an instance.
4193

4194
  """
4195
  HPATH = "disk-grow"
4196
  HTYPE = constants.HTYPE_INSTANCE
4197
  _OP_REQP = ["instance_name", "disk", "amount"]
4198
  REQ_BGL = False
4199

    
4200
  def ExpandNames(self):
4201
    self._ExpandAndLockInstance()
4202
    self.needed_locks[locking.LEVEL_NODE] = []
4203
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4204

    
4205
  def DeclareLocks(self, level):
4206
    if level == locking.LEVEL_NODE:
4207
      self._LockInstancesNodes()
4208

    
4209
  def BuildHooksEnv(self):
4210
    """Build hooks env.
4211

4212
    This runs on the master, the primary and all the secondaries.
4213

4214
    """
4215
    env = {
4216
      "DISK": self.op.disk,
4217
      "AMOUNT": self.op.amount,
4218
      }
4219
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4220
    nl = [
4221
      self.sstore.GetMasterNode(),
4222
      self.instance.primary_node,
4223
      ]
4224
    return env, nl, nl
4225

    
4226
  def CheckPrereq(self):
4227
    """Check prerequisites.
4228

4229
    This checks that the instance is in the cluster.
4230

4231
    """
4232
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4233
    assert instance is not None, \
4234
      "Cannot retrieve locked instance %s" % self.op.instance_name
4235

    
4236
    self.instance = instance
4237

    
4238
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
4239
      raise errors.OpPrereqError("Instance's disk layout does not support"
4240
                                 " growing.")
4241

    
4242
    if instance.FindDisk(self.op.disk) is None:
4243
      raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
4244
                                 (self.op.disk, instance.name))
4245

    
4246
    nodenames = [instance.primary_node] + list(instance.secondary_nodes)
4247
    nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
4248
    for node in nodenames:
4249
      info = nodeinfo.get(node, None)
4250
      if not info:
4251
        raise errors.OpPrereqError("Cannot get current information"
4252
                                   " from node '%s'" % node)
4253
      vg_free = info.get('vg_free', None)
4254
      if not isinstance(vg_free, int):
4255
        raise errors.OpPrereqError("Can't compute free disk space on"
4256
                                   " node %s" % node)
4257
      if self.op.amount > info['vg_free']:
4258
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
4259
                                   " %d MiB available, %d MiB required" %
4260
                                   (node, info['vg_free'], self.op.amount))
4261

    
4262
  def Exec(self, feedback_fn):
4263
    """Execute disk grow.
4264

4265
    """
4266
    instance = self.instance
4267
    disk = instance.FindDisk(self.op.disk)
4268
    for node in (instance.secondary_nodes + (instance.primary_node,)):
4269
      self.cfg.SetDiskID(disk, node)
4270
      result = rpc.call_blockdev_grow(node, disk, self.op.amount)
4271
      if not result or not isinstance(result, (list, tuple)) or len(result) != 2:
4272
        raise errors.OpExecError("grow request failed to node %s" % node)
4273
      elif not result[0]:
4274
        raise errors.OpExecError("grow request failed to node %s: %s" %
4275
                                 (node, result[1]))
4276
    disk.RecordGrow(self.op.amount)
4277
    self.cfg.Update(instance)
4278
    return
4279

    
4280

    
4281
class LUQueryInstanceData(NoHooksLU):
4282
  """Query runtime instance data.
4283

4284
  """
4285
  _OP_REQP = ["instances"]
4286
  REQ_BGL = False
4287

    
4288
  def ExpandNames(self):
4289
    self.needed_locks = {}
4290
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
4291

    
4292
    if not isinstance(self.op.instances, list):
4293
      raise errors.OpPrereqError("Invalid argument type 'instances'")
4294

    
4295
    if self.op.instances:
4296
      self.wanted_names = []
4297
      for name in self.op.instances:
4298
        full_name = self.cfg.ExpandInstanceName(name)
4299
        if full_name is None:
4300
          raise errors.OpPrereqError("Instance '%s' not known" %
4301
                                     self.op.instance_name)
4302
        self.wanted_names.append(full_name)
4303
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
4304
    else:
4305
      self.wanted_names = None
4306
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
4307

    
4308
    self.needed_locks[locking.LEVEL_NODE] = []
4309
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4310

    
4311
  def DeclareLocks(self, level):
4312
    if level == locking.LEVEL_NODE:
4313
      self._LockInstancesNodes()
4314

    
4315
  def CheckPrereq(self):
4316
    """Check prerequisites.
4317

4318
    This only checks the optional instance list against the existing names.
4319

4320
    """
4321
    if self.wanted_names is None:
4322
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
4323

    
4324
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
4325
                             in self.wanted_names]
4326
    return
4327

    
4328
  def _ComputeDiskStatus(self, instance, snode, dev):
4329
    """Compute block device status.
4330

4331
    """
4332
    self.cfg.SetDiskID(dev, instance.primary_node)
4333
    dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
4334
    if dev.dev_type in constants.LDS_DRBD:
4335
      # we change the snode then (otherwise we use the one passed in)
4336
      if dev.logical_id[0] == instance.primary_node:
4337
        snode = dev.logical_id[1]
4338
      else:
4339
        snode = dev.logical_id[0]
4340

    
4341
    if snode:
4342
      self.cfg.SetDiskID(dev, snode)
4343
      dev_sstatus = rpc.call_blockdev_find(snode, dev)
4344
    else:
4345
      dev_sstatus = None
4346

    
4347
    if dev.children:
4348
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
4349
                      for child in dev.children]
4350
    else:
4351
      dev_children = []
4352

    
4353
    data = {
4354
      "iv_name": dev.iv_name,
4355
      "dev_type": dev.dev_type,
4356
      "logical_id": dev.logical_id,
4357
      "physical_id": dev.physical_id,
4358
      "pstatus": dev_pstatus,
4359
      "sstatus": dev_sstatus,
4360
      "children": dev_children,
4361
      }
4362

    
4363
    return data
4364

    
4365
  def Exec(self, feedback_fn):
4366
    """Gather and return data"""
4367
    result = {}
4368
    for instance in self.wanted_instances:
4369
      remote_info = rpc.call_instance_info(instance.primary_node,
4370
                                                instance.name)
4371
      if remote_info and "state" in remote_info:
4372
        remote_state = "up"
4373
      else:
4374
        remote_state = "down"
4375
      if instance.status == "down":
4376
        config_state = "down"
4377
      else:
4378
        config_state = "up"
4379

    
4380
      disks = [self._ComputeDiskStatus(instance, None, device)
4381
               for device in instance.disks]
4382

    
4383
      idict = {
4384
        "name": instance.name,
4385
        "config_state": config_state,
4386
        "run_state": remote_state,
4387
        "pnode": instance.primary_node,
4388
        "snodes": instance.secondary_nodes,
4389
        "os": instance.os,
4390
        "memory": instance.memory,
4391
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4392
        "disks": disks,
4393
        "vcpus": instance.vcpus,
4394
        }
4395

    
4396
      htkind = self.sstore.GetHypervisorType()
4397
      if htkind == constants.HT_XEN_PVM30:
4398
        idict["kernel_path"] = instance.kernel_path
4399
        idict["initrd_path"] = instance.initrd_path
4400

    
4401
      if htkind == constants.HT_XEN_HVM31:
4402
        idict["hvm_boot_order"] = instance.hvm_boot_order
4403
        idict["hvm_acpi"] = instance.hvm_acpi
4404
        idict["hvm_pae"] = instance.hvm_pae
4405
        idict["hvm_cdrom_image_path"] = instance.hvm_cdrom_image_path
4406
        idict["hvm_nic_type"] = instance.hvm_nic_type
4407
        idict["hvm_disk_type"] = instance.hvm_disk_type
4408

    
4409
      if htkind in constants.HTS_REQ_PORT:
4410
        if instance.vnc_bind_address is None:
4411
          vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
4412
        else:
4413
          vnc_bind_address = instance.vnc_bind_address
4414
        if instance.network_port is None:
4415
          vnc_console_port = None
4416
        elif vnc_bind_address == constants.BIND_ADDRESS_GLOBAL:
4417
          vnc_console_port = "%s:%s" % (instance.primary_node,
4418
                                       instance.network_port)
4419
        elif vnc_bind_address == constants.LOCALHOST_IP_ADDRESS:
4420
          vnc_console_port = "%s:%s on node %s" % (vnc_bind_address,
4421
                                                   instance.network_port,
4422
                                                   instance.primary_node)
4423
        else:
4424
          vnc_console_port = "%s:%s" % (instance.vnc_bind_address,
4425
                                        instance.network_port)
4426
        idict["vnc_console_port"] = vnc_console_port
4427
        idict["vnc_bind_address"] = vnc_bind_address
4428
        idict["network_port"] = instance.network_port
4429

    
4430
      result[instance.name] = idict
4431

    
4432
    return result
4433

    
4434

    
4435
class LUSetInstanceParams(LogicalUnit):
4436
  """Modifies an instances's parameters.
4437

4438
  """
4439
  HPATH = "instance-modify"
4440
  HTYPE = constants.HTYPE_INSTANCE
4441
  _OP_REQP = ["instance_name"]
4442
  REQ_BGL = False
4443

    
4444
  def ExpandNames(self):
4445
    self._ExpandAndLockInstance()
4446

    
4447
  def BuildHooksEnv(self):
4448
    """Build hooks env.
4449

4450
    This runs on the master, primary and secondaries.
4451

4452
    """
4453
    args = dict()
4454
    if self.mem:
4455
      args['memory'] = self.mem
4456
    if self.vcpus:
4457
      args['vcpus'] = self.vcpus
4458
    if self.do_ip or self.do_bridge or self.mac:
4459
      if self.do_ip:
4460
        ip = self.ip
4461
      else:
4462
        ip = self.instance.nics[0].ip
4463
      if self.bridge:
4464
        bridge = self.bridge
4465
      else:
4466
        bridge = self.instance.nics[0].bridge
4467
      if self.mac:
4468
        mac = self.mac
4469
      else:
4470
        mac = self.instance.nics[0].mac
4471
      args['nics'] = [(ip, bridge, mac)]
4472
    env = _BuildInstanceHookEnvByObject(self.instance, override=args)
4473
    nl = [self.sstore.GetMasterNode(),
4474
          self.instance.primary_node] + list(self.instance.secondary_nodes)
4475
    return env, nl, nl
4476

    
4477
  def CheckPrereq(self):
4478
    """Check prerequisites.
4479

4480
    This only checks the instance list against the existing names.
4481

4482
    """
4483
    # FIXME: all the parameters could be checked before, in ExpandNames, or in
4484
    # a separate CheckArguments function, if we implement one, so the operation
4485
    # can be aborted without waiting for any lock, should it have an error...
4486
    self.mem = getattr(self.op, "mem", None)
4487
    self.vcpus = getattr(self.op, "vcpus", None)
4488
    self.ip = getattr(self.op, "ip", None)
4489
    self.mac = getattr(self.op, "mac", None)
4490
    self.bridge = getattr(self.op, "bridge", None)
4491
    self.kernel_path = getattr(self.op, "kernel_path", None)
4492
    self.initrd_path = getattr(self.op, "initrd_path", None)
4493
    self.hvm_boot_order = getattr(self.op, "hvm_boot_order", None)
4494
    self.hvm_acpi = getattr(self.op, "hvm_acpi", None)
4495
    self.hvm_pae = getattr(self.op, "hvm_pae", None)
4496
    self.hvm_nic_type = getattr(self.op, "hvm_nic_type", None)
4497
    self.hvm_disk_type = getattr(self.op, "hvm_disk_type", None)
4498
    self.hvm_cdrom_image_path = getattr(self.op, "hvm_cdrom_image_path", None)
4499
    self.vnc_bind_address = getattr(self.op, "vnc_bind_address", None)
4500
    self.force = getattr(self.op, "force", None)
4501
    all_parms = [self.mem, self.vcpus, self.ip, self.bridge, self.mac,
4502
                 self.kernel_path, self.initrd_path, self.hvm_boot_order,
4503
                 self.hvm_acpi, self.hvm_pae, self.hvm_cdrom_image_path,
4504
                 self.vnc_bind_address, self.hvm_nic_type, self.hvm_disk_type]
4505
    if all_parms.count(None) == len(all_parms):
4506
      raise errors.OpPrereqError("No changes submitted")
4507
    if self.mem is not None:
4508
      try:
4509
        self.mem = int(self.mem)
4510
      except ValueError, err:
4511
        raise errors.OpPrereqError("Invalid memory size: %s" % str(err))
4512
    if self.vcpus is not None:
4513
      try:
4514
        self.vcpus = int(self.vcpus)
4515
      except ValueError, err:
4516
        raise errors.OpPrereqError("Invalid vcpus number: %s" % str(err))
4517
    if self.ip is not None:
4518
      self.do_ip = True
4519
      if self.ip.lower() == "none":
4520
        self.ip = None
4521
      else:
4522
        if not utils.IsValidIP(self.ip):
4523
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
4524
    else:
4525
      self.do_ip = False
4526
    self.do_bridge = (self.bridge is not None)
4527
    if self.mac is not None:
4528
      if self.cfg.IsMacInUse(self.mac):
4529
        raise errors.OpPrereqError('MAC address %s already in use in cluster' %
4530
                                   self.mac)
4531
      if not utils.IsValidMac(self.mac):
4532
        raise errors.OpPrereqError('Invalid MAC address %s' % self.mac)
4533

    
4534
    if self.kernel_path is not None:
4535
      self.do_kernel_path = True
4536
      if self.kernel_path == constants.VALUE_NONE:
4537
        raise errors.OpPrereqError("Can't set instance to no kernel")
4538

    
4539
      if self.kernel_path != constants.VALUE_DEFAULT:
4540
        if not os.path.isabs(self.kernel_path):
4541
          raise errors.OpPrereqError("The kernel path must be an absolute"
4542
                                    " filename")
4543
    else:
4544
      self.do_kernel_path = False
4545

    
4546
    if self.initrd_path is not None:
4547
      self.do_initrd_path = True
4548
      if self.initrd_path not in (constants.VALUE_NONE,
4549
                                  constants.VALUE_DEFAULT):
4550
        if not os.path.isabs(self.initrd_path):
4551
          raise errors.OpPrereqError("The initrd path must be an absolute"
4552
                                    " filename")
4553
    else:
4554
      self.do_initrd_path = False
4555

    
4556
    # boot order verification
4557
    if self.hvm_boot_order is not None:
4558
      if self.hvm_boot_order != constants.VALUE_DEFAULT:
4559
        if len(self.hvm_boot_order.strip("acdn")) != 0:
4560
          raise errors.OpPrereqError("invalid boot order specified,"
4561
                                     " must be one or more of [acdn]"
4562
                                     " or 'default'")
4563

    
4564
    # hvm_cdrom_image_path verification
4565
    if self.op.hvm_cdrom_image_path is not None:
4566
      if not (os.path.isabs(self.op.hvm_cdrom_image_path) or
4567
              self.op.hvm_cdrom_image_path.lower() == "none"):
4568
        raise errors.OpPrereqError("The path to the HVM CDROM image must"
4569
                                   " be an absolute path or None, not %s" %
4570
                                   self.op.hvm_cdrom_image_path)
4571
      if not (os.path.isfile(self.op.hvm_cdrom_image_path) or
4572
              self.op.hvm_cdrom_image_path.lower() == "none"):
4573
        raise errors.OpPrereqError("The HVM CDROM image must either be a"
4574
                                   " regular file or a symlink pointing to"
4575
                                   " an existing regular file, not %s" %
4576
                                   self.op.hvm_cdrom_image_path)
4577

    
4578
    # vnc_bind_address verification
4579
    if self.op.vnc_bind_address is not None:
4580
      if not utils.IsValidIP(self.op.vnc_bind_address):
4581
        raise errors.OpPrereqError("given VNC bind address '%s' doesn't look"
4582
                                   " like a valid IP address" %
4583
                                   self.op.vnc_bind_address)
4584

    
4585
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4586
    assert self.instance is not None, \
4587
      "Cannot retrieve locked instance %s" % self.op.instance_name
4588
    self.warn = []
4589
    if self.mem is not None and not self.force:
4590
      pnode = self.instance.primary_node
4591
      nodelist = [pnode]
4592
      nodelist.extend(instance.secondary_nodes)
4593
      instance_info = rpc.call_instance_info(pnode, instance.name)
4594
      nodeinfo = rpc.call_node_info(nodelist, self.cfg.GetVGName())
4595

    
4596
      if pnode not in nodeinfo or not isinstance(nodeinfo[pnode], dict):
4597
        # Assume the primary node is unreachable and go ahead
4598
        self.warn.append("Can't get info from primary node %s" % pnode)
4599
      else:
4600
        if instance_info:
4601
          current_mem = instance_info['memory']
4602
        else:
4603
          # Assume instance not running
4604
          # (there is a slight race condition here, but it's not very probable,
4605
          # and we have no other way to check)
4606
          current_mem = 0
4607
        miss_mem = self.mem - current_mem - nodeinfo[pnode]['memory_free']
4608
        if miss_mem > 0:
4609
          raise errors.OpPrereqError("This change will prevent the instance"
4610
                                     " from starting, due to %d MB of memory"
4611
                                     " missing on its primary node" % miss_mem)
4612

    
4613
      for node in instance.secondary_nodes:
4614
        if node not in nodeinfo or not isinstance(nodeinfo[node], dict):
4615
          self.warn.append("Can't get info from secondary node %s" % node)
4616
        elif self.mem > nodeinfo[node]['memory_free']:
4617
          self.warn.append("Not enough memory to failover instance to secondary"
4618
                           " node %s" % node)
4619

    
4620
    # Xen HVM device type checks
4621
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
4622
      if self.op.hvm_nic_type is not None:
4623
        if self.op.hvm_nic_type not in constants.HT_HVM_VALID_NIC_TYPES:
4624
          raise errors.OpPrereqError("Invalid NIC type %s specified for Xen"
4625
                                     " HVM  hypervisor" % self.op.hvm_nic_type)
4626
      if self.op.hvm_disk_type is not None:
4627
        if self.op.hvm_disk_type not in constants.HT_HVM_VALID_DISK_TYPES:
4628
          raise errors.OpPrereqError("Invalid disk type %s specified for Xen"
4629
                                     " HVM hypervisor" % self.op.hvm_disk_type)
4630

    
4631
    return
4632

    
4633
  def Exec(self, feedback_fn):
4634
    """Modifies an instance.
4635

4636
    All parameters take effect only at the next restart of the instance.
4637
    """
4638
    # Process here the warnings from CheckPrereq, as we don't have a
4639
    # feedback_fn there.
4640
    for warn in self.warn:
4641
      feedback_fn("WARNING: %s" % warn)
4642

    
4643
    result = []
4644
    instance = self.instance
4645
    if self.mem:
4646
      instance.memory = self.mem
4647
      result.append(("mem", self.mem))
4648
    if self.vcpus:
4649
      instance.vcpus = self.vcpus
4650
      result.append(("vcpus",  self.vcpus))
4651
    if self.do_ip:
4652
      instance.nics[0].ip = self.ip
4653
      result.append(("ip", self.ip))
4654
    if self.bridge:
4655
      instance.nics[0].bridge = self.bridge
4656
      result.append(("bridge", self.bridge))
4657
    if self.mac:
4658
      instance.nics[0].mac = self.mac
4659
      result.append(("mac", self.mac))
4660
    if self.do_kernel_path:
4661
      instance.kernel_path = self.kernel_path
4662
      result.append(("kernel_path", self.kernel_path))
4663
    if self.do_initrd_path:
4664
      instance.initrd_path = self.initrd_path
4665
      result.append(("initrd_path", self.initrd_path))
4666
    if self.hvm_boot_order:
4667
      if self.hvm_boot_order == constants.VALUE_DEFAULT:
4668
        instance.hvm_boot_order = None
4669
      else:
4670
        instance.hvm_boot_order = self.hvm_boot_order
4671
      result.append(("hvm_boot_order", self.hvm_boot_order))
4672
    if self.hvm_acpi is not None:
4673
      instance.hvm_acpi = self.hvm_acpi
4674
      result.append(("hvm_acpi", self.hvm_acpi))
4675
    if self.hvm_pae is not None:
4676
      instance.hvm_pae = self.hvm_pae
4677
      result.append(("hvm_pae", self.hvm_pae))
4678
    if self.hvm_nic_type is not None:
4679
      instance.hvm_nic_type = self.hvm_nic_type
4680
      result.append(("hvm_nic_type", self.hvm_nic_type))
4681
    if self.hvm_disk_type is not None:
4682
      instance.hvm_disk_type = self.hvm_disk_type
4683
      result.append(("hvm_disk_type", self.hvm_disk_type))
4684
    if self.hvm_cdrom_image_path:
4685
      if self.hvm_cdrom_image_path == constants.VALUE_NONE:
4686
        instance.hvm_cdrom_image_path = None
4687
      else:
4688
        instance.hvm_cdrom_image_path = self.hvm_cdrom_image_path
4689
      result.append(("hvm_cdrom_image_path", self.hvm_cdrom_image_path))
4690
    if self.vnc_bind_address:
4691
      instance.vnc_bind_address = self.vnc_bind_address
4692
      result.append(("vnc_bind_address", self.vnc_bind_address))
4693

    
4694
    self.cfg.Update(instance)
4695

    
4696
    return result
4697

    
4698

    
4699
class LUQueryExports(NoHooksLU):
4700
  """Query the exports list
4701

4702
  """
4703
  _OP_REQP = ['nodes']
4704
  REQ_BGL = False
4705

    
4706
  def ExpandNames(self):
4707
    self.needed_locks = {}
4708
    self.share_locks[locking.LEVEL_NODE] = 1
4709
    if not self.op.nodes:
4710
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4711
    else:
4712
      self.needed_locks[locking.LEVEL_NODE] = \
4713
        _GetWantedNodes(self, self.op.nodes)
4714

    
4715
  def CheckPrereq(self):
4716
    """Check prerequisites.
4717

4718
    """
4719
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
4720

    
4721
  def Exec(self, feedback_fn):
4722
    """Compute the list of all the exported system images.
4723

4724
    Returns:
4725
      a dictionary with the structure node->(export-list)
4726
      where export-list is a list of the instances exported on
4727
      that node.
4728

4729
    """
4730
    return rpc.call_export_list(self.nodes)
4731

    
4732

    
4733
class LUExportInstance(LogicalUnit):
4734
  """Export an instance to an image in the cluster.
4735

4736
  """
4737
  HPATH = "instance-export"
4738
  HTYPE = constants.HTYPE_INSTANCE
4739
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
4740
  REQ_BGL = False
4741

    
4742
  def ExpandNames(self):
4743
    self._ExpandAndLockInstance()
4744
    # FIXME: lock only instance primary and destination node
4745
    #
4746
    # Sad but true, for now we have do lock all nodes, as we don't know where
4747
    # the previous export might be, and and in this LU we search for it and
4748
    # remove it from its current node. In the future we could fix this by:
4749
    #  - making a tasklet to search (share-lock all), then create the new one,
4750
    #    then one to remove, after
4751
    #  - removing the removal operation altoghether
4752
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4753

    
4754
  def DeclareLocks(self, level):
4755
    """Last minute lock declaration."""
4756
    # All nodes are locked anyway, so nothing to do here.
4757

    
4758
  def BuildHooksEnv(self):
4759
    """Build hooks env.
4760

4761
    This will run on the master, primary node and target node.
4762

4763
    """
4764
    env = {
4765
      "EXPORT_NODE": self.op.target_node,
4766
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
4767
      }
4768
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4769
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
4770
          self.op.target_node]
4771
    return env, nl, nl
4772

    
4773
  def CheckPrereq(self):
4774
    """Check prerequisites.
4775

4776
    This checks that the instance and node names are valid.
4777

4778
    """
4779
    instance_name = self.op.instance_name
4780
    self.instance = self.cfg.GetInstanceInfo(instance_name)
4781
    assert self.instance is not None, \
4782
          "Cannot retrieve locked instance %s" % self.op.instance_name
4783

    
4784
    self.dst_node = self.cfg.GetNodeInfo(
4785
      self.cfg.ExpandNodeName(self.op.target_node))
4786

    
4787
    assert self.dst_node is not None, \
4788
          "Cannot retrieve locked node %s" % self.op.target_node
4789

    
4790
    # instance disk type verification
4791
    for disk in self.instance.disks:
4792
      if disk.dev_type == constants.LD_FILE:
4793
        raise errors.OpPrereqError("Export not supported for instances with"
4794
                                   " file-based disks")
4795

    
4796
  def Exec(self, feedback_fn):
4797
    """Export an instance to an image in the cluster.
4798

4799
    """
4800
    instance = self.instance
4801
    dst_node = self.dst_node
4802
    src_node = instance.primary_node
4803
    if self.op.shutdown:
4804
      # shutdown the instance, but not the disks
4805
      if not rpc.call_instance_shutdown(src_node, instance):
4806
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
4807
                                 (instance.name, src_node))
4808

    
4809
    vgname = self.cfg.GetVGName()
4810

    
4811
    snap_disks = []
4812

    
4813
    try:
4814
      for disk in instance.disks:
4815
        if disk.iv_name == "sda":
4816
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
4817
          new_dev_name = rpc.call_blockdev_snapshot(src_node, disk)
4818

    
4819
          if not new_dev_name:
4820
            logger.Error("could not snapshot block device %s on node %s" %
4821
                         (disk.logical_id[1], src_node))
4822
          else:
4823
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
4824
                                      logical_id=(vgname, new_dev_name),
4825
                                      physical_id=(vgname, new_dev_name),
4826
                                      iv_name=disk.iv_name)
4827
            snap_disks.append(new_dev)
4828

    
4829
    finally:
4830
      if self.op.shutdown and instance.status == "up":
4831
        if not rpc.call_instance_start(src_node, instance, None):
4832
          _ShutdownInstanceDisks(instance, self.cfg)
4833
          raise errors.OpExecError("Could not start instance")
4834

    
4835
    # TODO: check for size
4836

    
4837
    for dev in snap_disks:
4838
      if not rpc.call_snapshot_export(src_node, dev, dst_node.name, instance):
4839
        logger.Error("could not export block device %s from node %s to node %s"
4840
                     % (dev.logical_id[1], src_node, dst_node.name))
4841
      if not rpc.call_blockdev_remove(src_node, dev):
4842
        logger.Error("could not remove snapshot block device %s from node %s" %
4843
                     (dev.logical_id[1], src_node))
4844

    
4845
    if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
4846
      logger.Error("could not finalize export for instance %s on node %s" %
4847
                   (instance.name, dst_node.name))
4848

    
4849
    nodelist = self.cfg.GetNodeList()
4850
    nodelist.remove(dst_node.name)
4851

    
4852
    # on one-node clusters nodelist will be empty after the removal
4853
    # if we proceed the backup would be removed because OpQueryExports
4854
    # substitutes an empty list with the full cluster node list.
4855
    if nodelist:
4856
      exportlist = rpc.call_export_list(nodelist)
4857
      for node in exportlist:
4858
        if instance.name in exportlist[node]:
4859
          if not rpc.call_export_remove(node, instance.name):
4860
            logger.Error("could not remove older export for instance %s"
4861
                         " on node %s" % (instance.name, node))
4862

    
4863

    
4864
class LURemoveExport(NoHooksLU):
4865
  """Remove exports related to the named instance.
4866

4867
  """
4868
  _OP_REQP = ["instance_name"]
4869
  REQ_BGL = False
4870

    
4871
  def ExpandNames(self):
4872
    self.needed_locks = {}
4873
    # We need all nodes to be locked in order for RemoveExport to work, but we
4874
    # don't need to lock the instance itself, as nothing will happen to it (and
4875
    # we can remove exports also for a removed instance)
4876
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4877

    
4878
  def CheckPrereq(self):
4879
    """Check prerequisites.
4880
    """
4881
    pass
4882

    
4883
  def Exec(self, feedback_fn):
4884
    """Remove any export.
4885

4886
    """
4887
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4888
    # If the instance was not found we'll try with the name that was passed in.
4889
    # This will only work if it was an FQDN, though.
4890
    fqdn_warn = False
4891
    if not instance_name:
4892
      fqdn_warn = True
4893
      instance_name = self.op.instance_name
4894

    
4895
    exportlist = rpc.call_export_list(self.acquired_locks[locking.LEVEL_NODE])
4896
    found = False
4897
    for node in exportlist:
4898
      if instance_name in exportlist[node]:
4899
        found = True
4900
        if not rpc.call_export_remove(node, instance_name):
4901
          logger.Error("could not remove export for instance %s"
4902
                       " on node %s" % (instance_name, node))
4903

    
4904
    if fqdn_warn and not found:
4905
      feedback_fn("Export not found. If trying to remove an export belonging"
4906
                  " to a deleted instance please use its Fully Qualified"
4907
                  " Domain Name.")
4908

    
4909

    
4910
class TagsLU(NoHooksLU):
4911
  """Generic tags LU.
4912

4913
  This is an abstract class which is the parent of all the other tags LUs.
4914

4915
  """
4916

    
4917
  def ExpandNames(self):
4918
    self.needed_locks = {}
4919
    if self.op.kind == constants.TAG_NODE:
4920
      name = self.cfg.ExpandNodeName(self.op.name)
4921
      if name is None:
4922
        raise errors.OpPrereqError("Invalid node name (%s)" %
4923
                                   (self.op.name,))
4924
      self.op.name = name
4925
      self.needed_locks[locking.LEVEL_NODE] = name
4926
    elif self.op.kind == constants.TAG_INSTANCE:
4927
      name = self.cfg.ExpandInstanceName(self.op.name)
4928
      if name is None:
4929
        raise errors.OpPrereqError("Invalid instance name (%s)" %
4930
                                   (self.op.name,))
4931
      self.op.name = name
4932
      self.needed_locks[locking.LEVEL_INSTANCE] = name
4933

    
4934
  def CheckPrereq(self):
4935
    """Check prerequisites.
4936

4937
    """
4938
    if self.op.kind == constants.TAG_CLUSTER:
4939
      self.target = self.cfg.GetClusterInfo()
4940
    elif self.op.kind == constants.TAG_NODE:
4941
      self.target = self.cfg.GetNodeInfo(self.op.name)
4942
    elif self.op.kind == constants.TAG_INSTANCE:
4943
      self.target = self.cfg.GetInstanceInfo(self.op.name)
4944
    else:
4945
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
4946
                                 str(self.op.kind))
4947

    
4948

    
4949
class LUGetTags(TagsLU):
4950
  """Returns the tags of a given object.
4951

4952
  """
4953
  _OP_REQP = ["kind", "name"]
4954
  REQ_BGL = False
4955

    
4956
  def Exec(self, feedback_fn):
4957
    """Returns the tag list.
4958

4959
    """
4960
    return list(self.target.GetTags())
4961

    
4962

    
4963
class LUSearchTags(NoHooksLU):
4964
  """Searches the tags for a given pattern.
4965

4966
  """
4967
  _OP_REQP = ["pattern"]
4968
  REQ_BGL = False
4969

    
4970
  def ExpandNames(self):
4971
    self.needed_locks = {}
4972

    
4973
  def CheckPrereq(self):
4974
    """Check prerequisites.
4975

4976
    This checks the pattern passed for validity by compiling it.
4977

4978
    """
4979
    try:
4980
      self.re = re.compile(self.op.pattern)
4981
    except re.error, err:
4982
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
4983
                                 (self.op.pattern, err))
4984

    
4985
  def Exec(self, feedback_fn):
4986
    """Returns the tag list.
4987

4988
    """
4989
    cfg = self.cfg
4990
    tgts = [("/cluster", cfg.GetClusterInfo())]
4991
    ilist = cfg.GetAllInstancesInfo().values()
4992
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
4993
    nlist = cfg.GetAllNodesInfo().values()
4994
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
4995
    results = []
4996
    for path, target in tgts:
4997
      for tag in target.GetTags():
4998
        if self.re.search(tag):
4999
          results.append((path, tag))
5000
    return results
5001

    
5002

    
5003
class LUAddTags(TagsLU):
5004
  """Sets a tag on a given object.
5005

5006
  """
5007
  _OP_REQP = ["kind", "name", "tags"]
5008
  REQ_BGL = False
5009

    
5010
  def CheckPrereq(self):
5011
    """Check prerequisites.
5012

5013
    This checks the type and length of the tag name and value.
5014

5015
    """
5016
    TagsLU.CheckPrereq(self)
5017
    for tag in self.op.tags:
5018
      objects.TaggableObject.ValidateTag(tag)
5019

    
5020
  def Exec(self, feedback_fn):
5021
    """Sets the tag.
5022

5023
    """
5024
    try:
5025
      for tag in self.op.tags:
5026
        self.target.AddTag(tag)
5027
    except errors.TagError, err:
5028
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
5029
    try:
5030
      self.cfg.Update(self.target)
5031
    except errors.ConfigurationError:
5032
      raise errors.OpRetryError("There has been a modification to the"
5033
                                " config file and the operation has been"
5034
                                " aborted. Please retry.")
5035

    
5036

    
5037
class LUDelTags(TagsLU):
5038
  """Delete a list of tags from a given object.
5039

5040
  """
5041
  _OP_REQP = ["kind", "name", "tags"]
5042
  REQ_BGL = False
5043

    
5044
  def CheckPrereq(self):
5045
    """Check prerequisites.
5046

5047
    This checks that we have the given tag.
5048

5049
    """
5050
    TagsLU.CheckPrereq(self)
5051
    for tag in self.op.tags:
5052
      objects.TaggableObject.ValidateTag(tag)
5053
    del_tags = frozenset(self.op.tags)
5054
    cur_tags = self.target.GetTags()
5055
    if not del_tags <= cur_tags:
5056
      diff_tags = del_tags - cur_tags
5057
      diff_names = ["'%s'" % tag for tag in diff_tags]
5058
      diff_names.sort()
5059
      raise errors.OpPrereqError("Tag(s) %s not found" %
5060
                                 (",".join(diff_names)))
5061

    
5062
  def Exec(self, feedback_fn):
5063
    """Remove the tag from the object.
5064

5065
    """
5066
    for tag in self.op.tags:
5067
      self.target.RemoveTag(tag)
5068
    try:
5069
      self.cfg.Update(self.target)
5070
    except errors.ConfigurationError:
5071
      raise errors.OpRetryError("There has been a modification to the"
5072
                                " config file and the operation has been"
5073
                                " aborted. Please retry.")
5074

    
5075

    
5076
class LUTestDelay(NoHooksLU):
5077
  """Sleep for a specified amount of time.
5078

5079
  This LU sleeps on the master and/or nodes for a specified amount of
5080
  time.
5081

5082
  """
5083
  _OP_REQP = ["duration", "on_master", "on_nodes"]
5084
  REQ_BGL = False
5085

    
5086
  def ExpandNames(self):
5087
    """Expand names and set required locks.
5088

5089
    This expands the node list, if any.
5090

5091
    """
5092
    self.needed_locks = {}
5093
    if self.op.on_nodes:
5094
      # _GetWantedNodes can be used here, but is not always appropriate to use
5095
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
5096
      # more information.
5097
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
5098
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
5099

    
5100
  def CheckPrereq(self):
5101
    """Check prerequisites.
5102

5103
    """
5104

    
5105
  def Exec(self, feedback_fn):
5106
    """Do the actual sleep.
5107

5108
    """
5109
    if self.op.on_master:
5110
      if not utils.TestDelay(self.op.duration):
5111
        raise errors.OpExecError("Error during master delay test")
5112
    if self.op.on_nodes:
5113
      result = rpc.call_test_delay(self.op.on_nodes, self.op.duration)
5114
      if not result:
5115
        raise errors.OpExecError("Complete failure from rpc call")
5116
      for node, node_result in result.items():
5117
        if not node_result:
5118
          raise errors.OpExecError("Failure during rpc call to node %s,"
5119
                                   " result: %s" % (node, node_result))
5120

    
5121

    
5122
class IAllocator(object):
5123
  """IAllocator framework.
5124

5125
  An IAllocator instance has three sets of attributes:
5126
    - cfg/sstore that are needed to query the cluster
5127
    - input data (all members of the _KEYS class attribute are required)
5128
    - four buffer attributes (in|out_data|text), that represent the
5129
      input (to the external script) in text and data structure format,
5130
      and the output from it, again in two formats
5131
    - the result variables from the script (success, info, nodes) for
5132
      easy usage
5133

5134
  """
5135
  _ALLO_KEYS = [
5136
    "mem_size", "disks", "disk_template",
5137
    "os", "tags", "nics", "vcpus",
5138
    ]
5139
  _RELO_KEYS = [
5140
    "relocate_from",
5141
    ]
5142

    
5143
  def __init__(self, cfg, sstore, mode, name, **kwargs):
5144
    self.cfg = cfg
5145
    self.sstore = sstore
5146
    # init buffer variables
5147
    self.in_text = self.out_text = self.in_data = self.out_data = None
5148
    # init all input fields so that pylint is happy
5149
    self.mode = mode
5150
    self.name = name
5151
    self.mem_size = self.disks = self.disk_template = None
5152
    self.os = self.tags = self.nics = self.vcpus = None
5153
    self.relocate_from = None
5154
    # computed fields
5155
    self.required_nodes = None
5156
    # init result fields
5157
    self.success = self.info = self.nodes = None
5158
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5159
      keyset = self._ALLO_KEYS
5160
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
5161
      keyset = self._RELO_KEYS
5162
    else:
5163
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
5164
                                   " IAllocator" % self.mode)
5165
    for key in kwargs:
5166
      if key not in keyset:
5167
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
5168
                                     " IAllocator" % key)
5169
      setattr(self, key, kwargs[key])
5170
    for key in keyset:
5171
      if key not in kwargs:
5172
        raise errors.ProgrammerError("Missing input parameter '%s' to"
5173
                                     " IAllocator" % key)
5174
    self._BuildInputData()
5175

    
5176
  def _ComputeClusterData(self):
5177
    """Compute the generic allocator input data.
5178

5179
    This is the data that is independent of the actual operation.
5180

5181
    """
5182
    cfg = self.cfg
5183
    # cluster data
5184
    data = {
5185
      "version": 1,
5186
      "cluster_name": self.sstore.GetClusterName(),
5187
      "cluster_tags": list(cfg.GetClusterInfo().GetTags()),
5188
      "hypervisor_type": self.sstore.GetHypervisorType(),
5189
      # we don't have job IDs
5190
      }
5191

    
5192
    i_list = [cfg.GetInstanceInfo(iname) for iname in cfg.GetInstanceList()]
5193

    
5194
    # node data
5195
    node_results = {}
5196
    node_list = cfg.GetNodeList()
5197
    node_data = rpc.call_node_info(node_list, cfg.GetVGName())
5198
    for nname in node_list:
5199
      ninfo = cfg.GetNodeInfo(nname)
5200
      if nname not in node_data or not isinstance(node_data[nname], dict):
5201
        raise errors.OpExecError("Can't get data for node %s" % nname)
5202
      remote_info = node_data[nname]
5203
      for attr in ['memory_total', 'memory_free', 'memory_dom0',
5204
                   'vg_size', 'vg_free', 'cpu_total']:
5205
        if attr not in remote_info:
5206
          raise errors.OpExecError("Node '%s' didn't return attribute '%s'" %
5207
                                   (nname, attr))
5208
        try:
5209
          remote_info[attr] = int(remote_info[attr])
5210
        except ValueError, err:
5211
          raise errors.OpExecError("Node '%s' returned invalid value for '%s':"
5212
                                   " %s" % (nname, attr, str(err)))
5213
      # compute memory used by primary instances
5214
      i_p_mem = i_p_up_mem = 0
5215
      for iinfo in i_list:
5216
        if iinfo.primary_node == nname:
5217
          i_p_mem += iinfo.memory
5218
          if iinfo.status == "up":
5219
            i_p_up_mem += iinfo.memory
5220

    
5221
      # compute memory used by instances
5222
      pnr = {
5223
        "tags": list(ninfo.GetTags()),
5224
        "total_memory": remote_info['memory_total'],
5225
        "reserved_memory": remote_info['memory_dom0'],
5226
        "free_memory": remote_info['memory_free'],
5227
        "i_pri_memory": i_p_mem,
5228
        "i_pri_up_memory": i_p_up_mem,
5229
        "total_disk": remote_info['vg_size'],
5230
        "free_disk": remote_info['vg_free'],
5231
        "primary_ip": ninfo.primary_ip,
5232
        "secondary_ip": ninfo.secondary_ip,
5233
        "total_cpus": remote_info['cpu_total'],
5234
        }
5235
      node_results[nname] = pnr
5236
    data["nodes"] = node_results
5237

    
5238
    # instance data
5239
    instance_data = {}
5240
    for iinfo in i_list:
5241
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
5242
                  for n in iinfo.nics]
5243
      pir = {
5244
        "tags": list(iinfo.GetTags()),
5245
        "should_run": iinfo.status == "up",
5246
        "vcpus": iinfo.vcpus,
5247
        "memory": iinfo.memory,
5248
        "os": iinfo.os,
5249
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
5250
        "nics": nic_data,
5251
        "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
5252
        "disk_template": iinfo.disk_template,
5253
        }
5254
      instance_data[iinfo.name] = pir
5255

    
5256
    data["instances"] = instance_data
5257

    
5258
    self.in_data = data
5259

    
5260
  def _AddNewInstance(self):
5261
    """Add new instance data to allocator structure.
5262

5263
    This in combination with _AllocatorGetClusterData will create the
5264
    correct structure needed as input for the allocator.
5265

5266
    The checks for the completeness of the opcode must have already been
5267
    done.
5268

5269
    """
5270
    data = self.in_data
5271
    if len(self.disks) != 2:
5272
      raise errors.OpExecError("Only two-disk configurations supported")
5273

    
5274
    disk_space = _ComputeDiskSize(self.disk_template,
5275
                                  self.disks[0]["size"], self.disks[1]["size"])
5276

    
5277
    if self.disk_template in constants.DTS_NET_MIRROR:
5278
      self.required_nodes = 2
5279
    else:
5280
      self.required_nodes = 1
5281
    request = {
5282
      "type": "allocate",
5283
      "name": self.name,
5284
      "disk_template": self.disk_template,
5285
      "tags": self.tags,
5286
      "os": self.os,
5287
      "vcpus": self.vcpus,
5288
      "memory": self.mem_size,
5289
      "disks": self.disks,
5290
      "disk_space_total": disk_space,
5291
      "nics": self.nics,
5292
      "required_nodes": self.required_nodes,
5293
      }
5294
    data["request"] = request
5295

    
5296
  def _AddRelocateInstance(self):
5297
    """Add relocate instance data to allocator structure.
5298

5299
    This in combination with _IAllocatorGetClusterData will create the
5300
    correct structure needed as input for the allocator.
5301

5302
    The checks for the completeness of the opcode must have already been
5303
    done.
5304

5305
    """
5306
    instance = self.cfg.GetInstanceInfo(self.name)
5307
    if instance is None:
5308
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
5309
                                   " IAllocator" % self.name)
5310

    
5311
    if instance.disk_template not in constants.DTS_NET_MIRROR:
5312
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
5313

    
5314
    if len(instance.secondary_nodes) != 1:
5315
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
5316

    
5317
    self.required_nodes = 1
5318

    
5319
    disk_space = _ComputeDiskSize(instance.disk_template,
5320
                                  instance.disks[0].size,
5321
                                  instance.disks[1].size)
5322

    
5323
    request = {
5324
      "type": "relocate",
5325
      "name": self.name,
5326
      "disk_space_total": disk_space,
5327
      "required_nodes": self.required_nodes,
5328
      "relocate_from": self.relocate_from,
5329
      }
5330
    self.in_data["request"] = request
5331

    
5332
  def _BuildInputData(self):
5333
    """Build input data structures.
5334

5335
    """
5336
    self._ComputeClusterData()
5337

    
5338
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5339
      self._AddNewInstance()
5340
    else:
5341
      self._AddRelocateInstance()
5342

    
5343
    self.in_text = serializer.Dump(self.in_data)
5344

    
5345
  def Run(self, name, validate=True, call_fn=rpc.call_iallocator_runner):
5346
    """Run an instance allocator and return the results.
5347

5348
    """
5349
    data = self.in_text
5350

    
5351
    result = call_fn(self.sstore.GetMasterNode(), name, self.in_text)
5352

    
5353
    if not isinstance(result, (list, tuple)) or len(result) != 4:
5354
      raise errors.OpExecError("Invalid result from master iallocator runner")
5355

    
5356
    rcode, stdout, stderr, fail = result
5357

    
5358
    if rcode == constants.IARUN_NOTFOUND:
5359
      raise errors.OpExecError("Can't find allocator '%s'" % name)
5360
    elif rcode == constants.IARUN_FAILURE:
5361
      raise errors.OpExecError("Instance allocator call failed: %s,"
5362
                               " output: %s" % (fail, stdout+stderr))
5363
    self.out_text = stdout
5364
    if validate:
5365
      self._ValidateResult()
5366

    
5367
  def _ValidateResult(self):
5368
    """Process the allocator results.
5369

5370
    This will process and if successful save the result in
5371
    self.out_data and the other parameters.
5372

5373
    """
5374
    try:
5375
      rdict = serializer.Load(self.out_text)
5376
    except Exception, err:
5377
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
5378

    
5379
    if not isinstance(rdict, dict):
5380
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
5381

    
5382
    for key in "success", "info", "nodes":
5383
      if key not in rdict:
5384
        raise errors.OpExecError("Can't parse iallocator results:"
5385
                                 " missing key '%s'" % key)
5386
      setattr(self, key, rdict[key])
5387

    
5388
    if not isinstance(rdict["nodes"], list):
5389
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
5390
                               " is not a list")
5391
    self.out_data = rdict
5392

    
5393

    
5394
class LUTestAllocator(NoHooksLU):
5395
  """Run allocator tests.
5396

5397
  This LU runs the allocator tests
5398

5399
  """
5400
  _OP_REQP = ["direction", "mode", "name"]
5401

    
5402
  def CheckPrereq(self):
5403
    """Check prerequisites.
5404

5405
    This checks the opcode parameters depending on the director and mode test.
5406

5407
    """
5408
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5409
      for attr in ["name", "mem_size", "disks", "disk_template",
5410
                   "os", "tags", "nics", "vcpus"]:
5411
        if not hasattr(self.op, attr):
5412
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
5413
                                     attr)
5414
      iname = self.cfg.ExpandInstanceName(self.op.name)
5415
      if iname is not None:
5416
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
5417
                                   iname)
5418
      if not isinstance(self.op.nics, list):
5419
        raise errors.OpPrereqError("Invalid parameter 'nics'")
5420
      for row in self.op.nics:
5421
        if (not isinstance(row, dict) or
5422
            "mac" not in row or
5423
            "ip" not in row or
5424
            "bridge" not in row):
5425
          raise errors.OpPrereqError("Invalid contents of the"
5426
                                     " 'nics' parameter")
5427
      if not isinstance(self.op.disks, list):
5428
        raise errors.OpPrereqError("Invalid parameter 'disks'")
5429
      if len(self.op.disks) != 2:
5430
        raise errors.OpPrereqError("Only two-disk configurations supported")
5431
      for row in self.op.disks:
5432
        if (not isinstance(row, dict) or
5433
            "size" not in row or
5434
            not isinstance(row["size"], int) or
5435
            "mode" not in row or
5436
            row["mode"] not in ['r', 'w']):
5437
          raise errors.OpPrereqError("Invalid contents of the"
5438
                                     " 'disks' parameter")
5439
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
5440
      if not hasattr(self.op, "name"):
5441
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
5442
      fname = self.cfg.ExpandInstanceName(self.op.name)
5443
      if fname is None:
5444
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
5445
                                   self.op.name)
5446
      self.op.name = fname
5447
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
5448
    else:
5449
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
5450
                                 self.op.mode)
5451

    
5452
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
5453
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
5454
        raise errors.OpPrereqError("Missing allocator name")
5455
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
5456
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
5457
                                 self.op.direction)
5458

    
5459
  def Exec(self, feedback_fn):
5460
    """Run the allocator test.
5461

5462
    """
5463
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5464
      ial = IAllocator(self.cfg, self.sstore,
5465
                       mode=self.op.mode,
5466
                       name=self.op.name,
5467
                       mem_size=self.op.mem_size,
5468
                       disks=self.op.disks,
5469
                       disk_template=self.op.disk_template,
5470
                       os=self.op.os,
5471
                       tags=self.op.tags,
5472
                       nics=self.op.nics,
5473
                       vcpus=self.op.vcpus,
5474
                       )
5475
    else:
5476
      ial = IAllocator(self.cfg, self.sstore,
5477
                       mode=self.op.mode,
5478
                       name=self.op.name,
5479
                       relocate_from=list(self.relocate_from),
5480
                       )
5481

    
5482
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
5483
      result = ial.in_text
5484
    else:
5485
      ial.Run(self.op.allocator, validate=False)
5486
      result = ial.out_text
5487
    return result