Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 112f18a5

History | View | Annotate | Download (208.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the master-side code."""
23

    
24
# pylint: disable-msg=W0613,W0201
25

    
26
import os
27
import os.path
28
import sha
29
import time
30
import tempfile
31
import re
32
import platform
33
import logging
34
import copy
35
import random
36

    
37
from ganeti import ssh
38
from ganeti import utils
39
from ganeti import errors
40
from ganeti import hypervisor
41
from ganeti import locking
42
from ganeti import constants
43
from ganeti import objects
44
from ganeti import opcodes
45
from ganeti import serializer
46
from ganeti import ssconf
47

    
48

    
49
class LogicalUnit(object):
50
  """Logical Unit base class.
51

52
  Subclasses must follow these rules:
53
    - implement ExpandNames
54
    - implement CheckPrereq
55
    - implement Exec
56
    - implement BuildHooksEnv
57
    - redefine HPATH and HTYPE
58
    - optionally redefine their run requirements:
59
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
60

61
  Note that all commands require root permissions.
62

63
  """
64
  HPATH = None
65
  HTYPE = None
66
  _OP_REQP = []
67
  REQ_BGL = True
68

    
69
  def __init__(self, processor, op, context, rpc):
70
    """Constructor for LogicalUnit.
71

72
    This needs to be overriden in derived classes in order to check op
73
    validity.
74

75
    """
76
    self.proc = processor
77
    self.op = op
78
    self.cfg = context.cfg
79
    self.context = context
80
    self.rpc = rpc
81
    # Dicts used to declare locking needs to mcpu
82
    self.needed_locks = None
83
    self.acquired_locks = {}
84
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
85
    self.add_locks = {}
86
    self.remove_locks = {}
87
    # Used to force good behavior when calling helper functions
88
    self.recalculate_locks = {}
89
    self.__ssh = None
90
    # logging
91
    self.LogWarning = processor.LogWarning
92
    self.LogInfo = processor.LogInfo
93

    
94
    for attr_name in self._OP_REQP:
95
      attr_val = getattr(op, attr_name, None)
96
      if attr_val is None:
97
        raise errors.OpPrereqError("Required parameter '%s' missing" %
98
                                   attr_name)
99
    self.CheckArguments()
100

    
101
  def __GetSSH(self):
102
    """Returns the SshRunner object
103

104
    """
105
    if not self.__ssh:
106
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
107
    return self.__ssh
108

    
109
  ssh = property(fget=__GetSSH)
110

    
111
  def CheckArguments(self):
112
    """Check syntactic validity for the opcode arguments.
113

114
    This method is for doing a simple syntactic check and ensure
115
    validity of opcode parameters, without any cluster-related
116
    checks. While the same can be accomplished in ExpandNames and/or
117
    CheckPrereq, doing these separate is better because:
118

119
      - ExpandNames is left as as purely a lock-related function
120
      - CheckPrereq is run after we have aquired locks (and possible
121
        waited for them)
122

123
    The function is allowed to change the self.op attribute so that
124
    later methods can no longer worry about missing parameters.
125

126
    """
127
    pass
128

    
129
  def ExpandNames(self):
130
    """Expand names for this LU.
131

132
    This method is called before starting to execute the opcode, and it should
133
    update all the parameters of the opcode to their canonical form (e.g. a
134
    short node name must be fully expanded after this method has successfully
135
    completed). This way locking, hooks, logging, ecc. can work correctly.
136

137
    LUs which implement this method must also populate the self.needed_locks
138
    member, as a dict with lock levels as keys, and a list of needed lock names
139
    as values. Rules:
140

141
      - use an empty dict if you don't need any lock
142
      - if you don't need any lock at a particular level omit that level
143
      - don't put anything for the BGL level
144
      - if you want all locks at a level use locking.ALL_SET as a value
145

146
    If you need to share locks (rather than acquire them exclusively) at one
147
    level you can modify self.share_locks, setting a true value (usually 1) for
148
    that level. By default locks are not shared.
149

150
    Examples::
151

152
      # Acquire all nodes and one instance
153
      self.needed_locks = {
154
        locking.LEVEL_NODE: locking.ALL_SET,
155
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
156
      }
157
      # Acquire just two nodes
158
      self.needed_locks = {
159
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
160
      }
161
      # Acquire no locks
162
      self.needed_locks = {} # No, you can't leave it to the default value None
163

164
    """
165
    # The implementation of this method is mandatory only if the new LU is
166
    # concurrent, so that old LUs don't need to be changed all at the same
167
    # time.
168
    if self.REQ_BGL:
169
      self.needed_locks = {} # Exclusive LUs don't need locks.
170
    else:
171
      raise NotImplementedError
172

    
173
  def DeclareLocks(self, level):
174
    """Declare LU locking needs for a level
175

176
    While most LUs can just declare their locking needs at ExpandNames time,
177
    sometimes there's the need to calculate some locks after having acquired
178
    the ones before. This function is called just before acquiring locks at a
179
    particular level, but after acquiring the ones at lower levels, and permits
180
    such calculations. It can be used to modify self.needed_locks, and by
181
    default it does nothing.
182

183
    This function is only called if you have something already set in
184
    self.needed_locks for the level.
185

186
    @param level: Locking level which is going to be locked
187
    @type level: member of ganeti.locking.LEVELS
188

189
    """
190

    
191
  def CheckPrereq(self):
192
    """Check prerequisites for this LU.
193

194
    This method should check that the prerequisites for the execution
195
    of this LU are fulfilled. It can do internode communication, but
196
    it should be idempotent - no cluster or system changes are
197
    allowed.
198

199
    The method should raise errors.OpPrereqError in case something is
200
    not fulfilled. Its return value is ignored.
201

202
    This method should also update all the parameters of the opcode to
203
    their canonical form if it hasn't been done by ExpandNames before.
204

205
    """
206
    raise NotImplementedError
207

    
208
  def Exec(self, feedback_fn):
209
    """Execute the LU.
210

211
    This method should implement the actual work. It should raise
212
    errors.OpExecError for failures that are somewhat dealt with in
213
    code, or expected.
214

215
    """
216
    raise NotImplementedError
217

    
218
  def BuildHooksEnv(self):
219
    """Build hooks environment for this LU.
220

221
    This method should return a three-node tuple consisting of: a dict
222
    containing the environment that will be used for running the
223
    specific hook for this LU, a list of node names on which the hook
224
    should run before the execution, and a list of node names on which
225
    the hook should run after the execution.
226

227
    The keys of the dict must not have 'GANETI_' prefixed as this will
228
    be handled in the hooks runner. Also note additional keys will be
229
    added by the hooks runner. If the LU doesn't define any
230
    environment, an empty dict (and not None) should be returned.
231

232
    No nodes should be returned as an empty list (and not None).
233

234
    Note that if the HPATH for a LU class is None, this function will
235
    not be called.
236

237
    """
238
    raise NotImplementedError
239

    
240
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
241
    """Notify the LU about the results of its hooks.
242

243
    This method is called every time a hooks phase is executed, and notifies
244
    the Logical Unit about the hooks' result. The LU can then use it to alter
245
    its result based on the hooks.  By default the method does nothing and the
246
    previous result is passed back unchanged but any LU can define it if it
247
    wants to use the local cluster hook-scripts somehow.
248

249
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
250
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
251
    @param hook_results: the results of the multi-node hooks rpc call
252
    @param feedback_fn: function used send feedback back to the caller
253
    @param lu_result: the previous Exec result this LU had, or None
254
        in the PRE phase
255
    @return: the new Exec result, based on the previous result
256
        and hook results
257

258
    """
259
    return lu_result
260

    
261
  def _ExpandAndLockInstance(self):
262
    """Helper function to expand and lock an instance.
263

264
    Many LUs that work on an instance take its name in self.op.instance_name
265
    and need to expand it and then declare the expanded name for locking. This
266
    function does it, and then updates self.op.instance_name to the expanded
267
    name. It also initializes needed_locks as a dict, if this hasn't been done
268
    before.
269

270
    """
271
    if self.needed_locks is None:
272
      self.needed_locks = {}
273
    else:
274
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
275
        "_ExpandAndLockInstance called with instance-level locks set"
276
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
277
    if expanded_name is None:
278
      raise errors.OpPrereqError("Instance '%s' not known" %
279
                                  self.op.instance_name)
280
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
281
    self.op.instance_name = expanded_name
282

    
283
  def _LockInstancesNodes(self, primary_only=False):
284
    """Helper function to declare instances' nodes for locking.
285

286
    This function should be called after locking one or more instances to lock
287
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
288
    with all primary or secondary nodes for instances already locked and
289
    present in self.needed_locks[locking.LEVEL_INSTANCE].
290

291
    It should be called from DeclareLocks, and for safety only works if
292
    self.recalculate_locks[locking.LEVEL_NODE] is set.
293

294
    In the future it may grow parameters to just lock some instance's nodes, or
295
    to just lock primaries or secondary nodes, if needed.
296

297
    If should be called in DeclareLocks in a way similar to::
298

299
      if level == locking.LEVEL_NODE:
300
        self._LockInstancesNodes()
301

302
    @type primary_only: boolean
303
    @param primary_only: only lock primary nodes of locked instances
304

305
    """
306
    assert locking.LEVEL_NODE in self.recalculate_locks, \
307
      "_LockInstancesNodes helper function called with no nodes to recalculate"
308

    
309
    # TODO: check if we're really been called with the instance locks held
310

    
311
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
312
    # future we might want to have different behaviors depending on the value
313
    # of self.recalculate_locks[locking.LEVEL_NODE]
314
    wanted_nodes = []
315
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
316
      instance = self.context.cfg.GetInstanceInfo(instance_name)
317
      wanted_nodes.append(instance.primary_node)
318
      if not primary_only:
319
        wanted_nodes.extend(instance.secondary_nodes)
320

    
321
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
322
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
323
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
324
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
325

    
326
    del self.recalculate_locks[locking.LEVEL_NODE]
327

    
328

    
329
class NoHooksLU(LogicalUnit):
330
  """Simple LU which runs no hooks.
331

332
  This LU is intended as a parent for other LogicalUnits which will
333
  run no hooks, in order to reduce duplicate code.
334

335
  """
336
  HPATH = None
337
  HTYPE = None
338

    
339

    
340
def _GetWantedNodes(lu, nodes):
341
  """Returns list of checked and expanded node names.
342

343
  @type lu: L{LogicalUnit}
344
  @param lu: the logical unit on whose behalf we execute
345
  @type nodes: list
346
  @param nodes: list of node names or None for all nodes
347
  @rtype: list
348
  @return: the list of nodes, sorted
349
  @raise errors.OpProgrammerError: if the nodes parameter is wrong type
350

351
  """
352
  if not isinstance(nodes, list):
353
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
354

    
355
  if not nodes:
356
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
357
      " non-empty list of nodes whose name is to be expanded.")
358

    
359
  wanted = []
360
  for name in nodes:
361
    node = lu.cfg.ExpandNodeName(name)
362
    if node is None:
363
      raise errors.OpPrereqError("No such node name '%s'" % name)
364
    wanted.append(node)
365

    
366
  return utils.NiceSort(wanted)
367

    
368

    
369
def _GetWantedInstances(lu, instances):
370
  """Returns list of checked and expanded instance names.
371

372
  @type lu: L{LogicalUnit}
373
  @param lu: the logical unit on whose behalf we execute
374
  @type instances: list
375
  @param instances: list of instance names or None for all instances
376
  @rtype: list
377
  @return: the list of instances, sorted
378
  @raise errors.OpPrereqError: if the instances parameter is wrong type
379
  @raise errors.OpPrereqError: if any of the passed instances is not found
380

381
  """
382
  if not isinstance(instances, list):
383
    raise errors.OpPrereqError("Invalid argument type 'instances'")
384

    
385
  if instances:
386
    wanted = []
387

    
388
    for name in instances:
389
      instance = lu.cfg.ExpandInstanceName(name)
390
      if instance is None:
391
        raise errors.OpPrereqError("No such instance name '%s'" % name)
392
      wanted.append(instance)
393

    
394
  else:
395
    wanted = lu.cfg.GetInstanceList()
396
  return utils.NiceSort(wanted)
397

    
398

    
399
def _CheckOutputFields(static, dynamic, selected):
400
  """Checks whether all selected fields are valid.
401

402
  @type static: L{utils.FieldSet}
403
  @param static: static fields set
404
  @type dynamic: L{utils.FieldSet}
405
  @param dynamic: dynamic fields set
406

407
  """
408
  f = utils.FieldSet()
409
  f.Extend(static)
410
  f.Extend(dynamic)
411

    
412
  delta = f.NonMatching(selected)
413
  if delta:
414
    raise errors.OpPrereqError("Unknown output fields selected: %s"
415
                               % ",".join(delta))
416

    
417

    
418
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
419
                          memory, vcpus, nics):
420
  """Builds instance related env variables for hooks
421

422
  This builds the hook environment from individual variables.
423

424
  @type name: string
425
  @param name: the name of the instance
426
  @type primary_node: string
427
  @param primary_node: the name of the instance's primary node
428
  @type secondary_nodes: list
429
  @param secondary_nodes: list of secondary nodes as strings
430
  @type os_type: string
431
  @param os_type: the name of the instance's OS
432
  @type status: string
433
  @param status: the desired status of the instances
434
  @type memory: string
435
  @param memory: the memory size of the instance
436
  @type vcpus: string
437
  @param vcpus: the count of VCPUs the instance has
438
  @type nics: list
439
  @param nics: list of tuples (ip, bridge, mac) representing
440
      the NICs the instance  has
441
  @rtype: dict
442
  @return: the hook environment for this instance
443

444
  """
445
  env = {
446
    "OP_TARGET": name,
447
    "INSTANCE_NAME": name,
448
    "INSTANCE_PRIMARY": primary_node,
449
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
450
    "INSTANCE_OS_TYPE": os_type,
451
    "INSTANCE_STATUS": status,
452
    "INSTANCE_MEMORY": memory,
453
    "INSTANCE_VCPUS": vcpus,
454
  }
455

    
456
  if nics:
457
    nic_count = len(nics)
458
    for idx, (ip, bridge, mac) in enumerate(nics):
459
      if ip is None:
460
        ip = ""
461
      env["INSTANCE_NIC%d_IP" % idx] = ip
462
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
463
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
464
  else:
465
    nic_count = 0
466

    
467
  env["INSTANCE_NIC_COUNT"] = nic_count
468

    
469
  return env
470

    
471

    
472
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
473
  """Builds instance related env variables for hooks from an object.
474

475
  @type lu: L{LogicalUnit}
476
  @param lu: the logical unit on whose behalf we execute
477
  @type instance: L{objects.Instance}
478
  @param instance: the instance for which we should build the
479
      environment
480
  @type override: dict
481
  @param override: dictionary with key/values that will override
482
      our values
483
  @rtype: dict
484
  @return: the hook environment dictionary
485

486
  """
487
  bep = lu.cfg.GetClusterInfo().FillBE(instance)
488
  args = {
489
    'name': instance.name,
490
    'primary_node': instance.primary_node,
491
    'secondary_nodes': instance.secondary_nodes,
492
    'os_type': instance.os,
493
    'status': instance.os,
494
    'memory': bep[constants.BE_MEMORY],
495
    'vcpus': bep[constants.BE_VCPUS],
496
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
497
  }
498
  if override:
499
    args.update(override)
500
  return _BuildInstanceHookEnv(**args)
501

    
502

    
503
def _CheckInstanceBridgesExist(lu, instance):
504
  """Check that the brigdes needed by an instance exist.
505

506
  """
507
  # check bridges existance
508
  brlist = [nic.bridge for nic in instance.nics]
509
  if not lu.rpc.call_bridges_exist(instance.primary_node, brlist):
510
    raise errors.OpPrereqError("one or more target bridges %s does not"
511
                               " exist on destination node '%s'" %
512
                               (brlist, instance.primary_node))
513

    
514

    
515
class LUDestroyCluster(NoHooksLU):
516
  """Logical unit for destroying the cluster.
517

518
  """
519
  _OP_REQP = []
520

    
521
  def CheckPrereq(self):
522
    """Check prerequisites.
523

524
    This checks whether the cluster is empty.
525

526
    Any errors are signalled by raising errors.OpPrereqError.
527

528
    """
529
    master = self.cfg.GetMasterNode()
530

    
531
    nodelist = self.cfg.GetNodeList()
532
    if len(nodelist) != 1 or nodelist[0] != master:
533
      raise errors.OpPrereqError("There are still %d node(s) in"
534
                                 " this cluster." % (len(nodelist) - 1))
535
    instancelist = self.cfg.GetInstanceList()
536
    if instancelist:
537
      raise errors.OpPrereqError("There are still %d instance(s) in"
538
                                 " this cluster." % len(instancelist))
539

    
540
  def Exec(self, feedback_fn):
541
    """Destroys the cluster.
542

543
    """
544
    master = self.cfg.GetMasterNode()
545
    if not self.rpc.call_node_stop_master(master, False):
546
      raise errors.OpExecError("Could not disable the master role")
547
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
548
    utils.CreateBackup(priv_key)
549
    utils.CreateBackup(pub_key)
550
    return master
551

    
552

    
553
class LUVerifyCluster(LogicalUnit):
554
  """Verifies the cluster status.
555

556
  """
557
  HPATH = "cluster-verify"
558
  HTYPE = constants.HTYPE_CLUSTER
559
  _OP_REQP = ["skip_checks"]
560
  REQ_BGL = False
561

    
562
  def ExpandNames(self):
563
    self.needed_locks = {
564
      locking.LEVEL_NODE: locking.ALL_SET,
565
      locking.LEVEL_INSTANCE: locking.ALL_SET,
566
    }
567
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
568

    
569
  def _VerifyNode(self, nodeinfo, file_list, local_cksum, vglist, node_result,
570
                  remote_version, feedback_fn, master_files):
571
    """Run multiple tests against a node.
572

573
    Test list:
574

575
      - compares ganeti version
576
      - checks vg existance and size > 20G
577
      - checks config file checksum
578
      - checks ssh to other nodes
579

580
    @type nodeinfo: L{objects.Node}
581
    @param nodeinfo: the node to check
582
    @param file_list: required list of files
583
    @param local_cksum: dictionary of local files and their checksums
584
    @type vglist: dict
585
    @param vglist: dictionary of volume group names and their size
586
    @param node_result: the results from the node
587
    @param remote_version: the RPC version from the remote node
588
    @param feedback_fn: function used to accumulate results
589
    @param master_files: list of files that only masters should have
590

591
    """
592
    node = nodeinfo.name
593
    # compares ganeti version
594
    local_version = constants.PROTOCOL_VERSION
595
    if not remote_version:
596
      feedback_fn("  - ERROR: connection to %s failed" % (node))
597
      return True
598

    
599
    if local_version != remote_version:
600
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
601
                      (local_version, node, remote_version))
602
      return True
603

    
604
    # checks vg existance and size > 20G
605

    
606
    bad = False
607
    if not vglist:
608
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
609
                      (node,))
610
      bad = True
611
    else:
612
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
613
                                            constants.MIN_VG_SIZE)
614
      if vgstatus:
615
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
616
        bad = True
617

    
618
    if not node_result:
619
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
620
      return True
621

    
622
    # checks config file checksum
623
    # checks ssh to any
624

    
625
    if 'filelist' not in node_result:
626
      bad = True
627
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
628
    else:
629
      remote_cksum = node_result['filelist']
630
      for file_name in file_list:
631
        node_is_mc = nodeinfo.master_candidate
632
        must_have_file = file_name not in master_files
633
        if file_name not in remote_cksum:
634
          if node_is_mc or must_have_file:
635
            bad = True
636
            feedback_fn("  - ERROR: file '%s' missing" % file_name)
637
        elif remote_cksum[file_name] != local_cksum[file_name]:
638
          if node_is_mc or must_have_file:
639
            bad = True
640
            feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
641
          else:
642
            # not candidate and this is not a must-have file
643
            bad = True
644
            feedback_fn("  - ERROR: non master-candidate has old/wrong file"
645
                        " '%s'" % file_name)
646
        else:
647
          # all good, except non-master/non-must have combination
648
          if not node_is_mc and not must_have_file:
649
            feedback_fn("  - ERROR: file '%s' should not exist on non master"
650
                        " candidates" % file_name)
651

    
652
    if 'nodelist' not in node_result:
653
      bad = True
654
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
655
    else:
656
      if node_result['nodelist']:
657
        bad = True
658
        for node in node_result['nodelist']:
659
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
660
                          (node, node_result['nodelist'][node]))
661
    if 'node-net-test' not in node_result:
662
      bad = True
663
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
664
    else:
665
      if node_result['node-net-test']:
666
        bad = True
667
        nlist = utils.NiceSort(node_result['node-net-test'].keys())
668
        for node in nlist:
669
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
670
                          (node, node_result['node-net-test'][node]))
671

    
672
    hyp_result = node_result.get('hypervisor', None)
673
    if isinstance(hyp_result, dict):
674
      for hv_name, hv_result in hyp_result.iteritems():
675
        if hv_result is not None:
676
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
677
                      (hv_name, hv_result))
678
    return bad
679

    
680
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
681
                      node_instance, feedback_fn):
682
    """Verify an instance.
683

684
    This function checks to see if the required block devices are
685
    available on the instance's node.
686

687
    """
688
    bad = False
689

    
690
    node_current = instanceconfig.primary_node
691

    
692
    node_vol_should = {}
693
    instanceconfig.MapLVsByNode(node_vol_should)
694

    
695
    for node in node_vol_should:
696
      for volume in node_vol_should[node]:
697
        if node not in node_vol_is or volume not in node_vol_is[node]:
698
          feedback_fn("  - ERROR: volume %s missing on node %s" %
699
                          (volume, node))
700
          bad = True
701

    
702
    if not instanceconfig.status == 'down':
703
      if (node_current not in node_instance or
704
          not instance in node_instance[node_current]):
705
        feedback_fn("  - ERROR: instance %s not running on node %s" %
706
                        (instance, node_current))
707
        bad = True
708

    
709
    for node in node_instance:
710
      if (not node == node_current):
711
        if instance in node_instance[node]:
712
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
713
                          (instance, node))
714
          bad = True
715

    
716
    return bad
717

    
718
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
719
    """Verify if there are any unknown volumes in the cluster.
720

721
    The .os, .swap and backup volumes are ignored. All other volumes are
722
    reported as unknown.
723

724
    """
725
    bad = False
726

    
727
    for node in node_vol_is:
728
      for volume in node_vol_is[node]:
729
        if node not in node_vol_should or volume not in node_vol_should[node]:
730
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
731
                      (volume, node))
732
          bad = True
733
    return bad
734

    
735
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
736
    """Verify the list of running instances.
737

738
    This checks what instances are running but unknown to the cluster.
739

740
    """
741
    bad = False
742
    for node in node_instance:
743
      for runninginstance in node_instance[node]:
744
        if runninginstance not in instancelist:
745
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
746
                          (runninginstance, node))
747
          bad = True
748
    return bad
749

    
750
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
751
    """Verify N+1 Memory Resilience.
752

753
    Check that if one single node dies we can still start all the instances it
754
    was primary for.
755

756
    """
757
    bad = False
758

    
759
    for node, nodeinfo in node_info.iteritems():
760
      # This code checks that every node which is now listed as secondary has
761
      # enough memory to host all instances it is supposed to should a single
762
      # other node in the cluster fail.
763
      # FIXME: not ready for failover to an arbitrary node
764
      # FIXME: does not support file-backed instances
765
      # WARNING: we currently take into account down instances as well as up
766
      # ones, considering that even if they're down someone might want to start
767
      # them even in the event of a node failure.
768
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
769
        needed_mem = 0
770
        for instance in instances:
771
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
772
          if bep[constants.BE_AUTO_BALANCE]:
773
            needed_mem += bep[constants.BE_MEMORY]
774
        if nodeinfo['mfree'] < needed_mem:
775
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
776
                      " failovers should node %s fail" % (node, prinode))
777
          bad = True
778
    return bad
779

    
780
  def CheckPrereq(self):
781
    """Check prerequisites.
782

783
    Transform the list of checks we're going to skip into a set and check that
784
    all its members are valid.
785

786
    """
787
    self.skip_set = frozenset(self.op.skip_checks)
788
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
789
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
790

    
791
  def BuildHooksEnv(self):
792
    """Build hooks env.
793

794
    Cluster-Verify hooks just rone in the post phase and their failure makes
795
    the output be logged in the verify output and the verification to fail.
796

797
    """
798
    all_nodes = self.cfg.GetNodeList()
799
    # TODO: populate the environment with useful information for verify hooks
800
    env = {}
801
    return env, [], all_nodes
802

    
803
  def Exec(self, feedback_fn):
804
    """Verify integrity of cluster, performing various test on nodes.
805

806
    """
807
    bad = False
808
    feedback_fn("* Verifying global settings")
809
    for msg in self.cfg.VerifyConfig():
810
      feedback_fn("  - ERROR: %s" % msg)
811

    
812
    vg_name = self.cfg.GetVGName()
813
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
814
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
815
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
816
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
817
    i_non_redundant = [] # Non redundant instances
818
    i_non_a_balanced = [] # Non auto-balanced instances
819
    node_volume = {}
820
    node_instance = {}
821
    node_info = {}
822
    instance_cfg = {}
823

    
824
    # FIXME: verify OS list
825
    # do local checksums
826
    master_files = [constants.CLUSTER_CONF_FILE]
827

    
828
    file_names = ssconf.SimpleStore().GetFileList()
829
    file_names.append(constants.SSL_CERT_FILE)
830
    file_names.extend(master_files)
831

    
832
    local_checksums = utils.FingerprintFiles(file_names)
833

    
834
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
835
    all_volumeinfo = self.rpc.call_volume_list(nodelist, vg_name)
836
    all_instanceinfo = self.rpc.call_instance_list(nodelist, hypervisors)
837
    all_vglist = self.rpc.call_vg_list(nodelist)
838
    node_verify_param = {
839
      'filelist': file_names,
840
      'nodelist': nodelist,
841
      'hypervisor': hypervisors,
842
      'node-net-test': [(node.name, node.primary_ip, node.secondary_ip)
843
                        for node in nodeinfo]
844
      }
845
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
846
                                           self.cfg.GetClusterName())
847
    all_rversion = self.rpc.call_version(nodelist)
848
    all_ninfo = self.rpc.call_node_info(nodelist, self.cfg.GetVGName(),
849
                                        self.cfg.GetHypervisorType())
850

    
851
    cluster = self.cfg.GetClusterInfo()
852
    master_node = self.cfg.GetMasterNode()
853
    for node_i in nodeinfo:
854
      node = node_i.name
855
      if node == master_node:
856
        ntype="master"
857
      elif node_i.master_candidate:
858
        ntype="master candidate"
859
      else:
860
        ntype="regular"
861
      feedback_fn("* Verifying node %s (%s)" % (node, ntype))
862
      result = self._VerifyNode(node_i, file_names, local_checksums,
863
                                all_vglist[node], all_nvinfo[node],
864
                                all_rversion[node], feedback_fn, master_files)
865
      bad = bad or result
866

    
867
      # node_volume
868
      volumeinfo = all_volumeinfo[node]
869

    
870
      if isinstance(volumeinfo, basestring):
871
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
872
                    (node, volumeinfo[-400:].encode('string_escape')))
873
        bad = True
874
        node_volume[node] = {}
875
      elif not isinstance(volumeinfo, dict):
876
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
877
        bad = True
878
        continue
879
      else:
880
        node_volume[node] = volumeinfo
881

    
882
      # node_instance
883
      nodeinstance = all_instanceinfo[node]
884
      if type(nodeinstance) != list:
885
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
886
        bad = True
887
        continue
888

    
889
      node_instance[node] = nodeinstance
890

    
891
      # node_info
892
      nodeinfo = all_ninfo[node]
893
      if not isinstance(nodeinfo, dict):
894
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
895
        bad = True
896
        continue
897

    
898
      try:
899
        node_info[node] = {
900
          "mfree": int(nodeinfo['memory_free']),
901
          "dfree": int(nodeinfo['vg_free']),
902
          "pinst": [],
903
          "sinst": [],
904
          # dictionary holding all instances this node is secondary for,
905
          # grouped by their primary node. Each key is a cluster node, and each
906
          # value is a list of instances which have the key as primary and the
907
          # current node as secondary.  this is handy to calculate N+1 memory
908
          # availability if you can only failover from a primary to its
909
          # secondary.
910
          "sinst-by-pnode": {},
911
        }
912
      except ValueError:
913
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
914
        bad = True
915
        continue
916

    
917
    node_vol_should = {}
918

    
919
    for instance in instancelist:
920
      feedback_fn("* Verifying instance %s" % instance)
921
      inst_config = self.cfg.GetInstanceInfo(instance)
922
      result =  self._VerifyInstance(instance, inst_config, node_volume,
923
                                     node_instance, feedback_fn)
924
      bad = bad or result
925

    
926
      inst_config.MapLVsByNode(node_vol_should)
927

    
928
      instance_cfg[instance] = inst_config
929

    
930
      pnode = inst_config.primary_node
931
      if pnode in node_info:
932
        node_info[pnode]['pinst'].append(instance)
933
      else:
934
        feedback_fn("  - ERROR: instance %s, connection to primary node"
935
                    " %s failed" % (instance, pnode))
936
        bad = True
937

    
938
      # If the instance is non-redundant we cannot survive losing its primary
939
      # node, so we are not N+1 compliant. On the other hand we have no disk
940
      # templates with more than one secondary so that situation is not well
941
      # supported either.
942
      # FIXME: does not support file-backed instances
943
      if len(inst_config.secondary_nodes) == 0:
944
        i_non_redundant.append(instance)
945
      elif len(inst_config.secondary_nodes) > 1:
946
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
947
                    % instance)
948

    
949
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
950
        i_non_a_balanced.append(instance)
951

    
952
      for snode in inst_config.secondary_nodes:
953
        if snode in node_info:
954
          node_info[snode]['sinst'].append(instance)
955
          if pnode not in node_info[snode]['sinst-by-pnode']:
956
            node_info[snode]['sinst-by-pnode'][pnode] = []
957
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
958
        else:
959
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
960
                      " %s failed" % (instance, snode))
961

    
962
    feedback_fn("* Verifying orphan volumes")
963
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
964
                                       feedback_fn)
965
    bad = bad or result
966

    
967
    feedback_fn("* Verifying remaining instances")
968
    result = self._VerifyOrphanInstances(instancelist, node_instance,
969
                                         feedback_fn)
970
    bad = bad or result
971

    
972
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
973
      feedback_fn("* Verifying N+1 Memory redundancy")
974
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
975
      bad = bad or result
976

    
977
    feedback_fn("* Other Notes")
978
    if i_non_redundant:
979
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
980
                  % len(i_non_redundant))
981

    
982
    if i_non_a_balanced:
983
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
984
                  % len(i_non_a_balanced))
985

    
986
    return not bad
987

    
988
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
989
    """Analize the post-hooks' result
990

991
    This method analyses the hook result, handles it, and sends some
992
    nicely-formatted feedback back to the user.
993

994
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
995
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
996
    @param hooks_results: the results of the multi-node hooks rpc call
997
    @param feedback_fn: function used send feedback back to the caller
998
    @param lu_result: previous Exec result
999
    @return: the new Exec result, based on the previous result
1000
        and hook results
1001

1002
    """
1003
    # We only really run POST phase hooks, and are only interested in
1004
    # their results
1005
    if phase == constants.HOOKS_PHASE_POST:
1006
      # Used to change hooks' output to proper indentation
1007
      indent_re = re.compile('^', re.M)
1008
      feedback_fn("* Hooks Results")
1009
      if not hooks_results:
1010
        feedback_fn("  - ERROR: general communication failure")
1011
        lu_result = 1
1012
      else:
1013
        for node_name in hooks_results:
1014
          show_node_header = True
1015
          res = hooks_results[node_name]
1016
          if res is False or not isinstance(res, list):
1017
            feedback_fn("    Communication failure")
1018
            lu_result = 1
1019
            continue
1020
          for script, hkr, output in res:
1021
            if hkr == constants.HKR_FAIL:
1022
              # The node header is only shown once, if there are
1023
              # failing hooks on that node
1024
              if show_node_header:
1025
                feedback_fn("  Node %s:" % node_name)
1026
                show_node_header = False
1027
              feedback_fn("    ERROR: Script %s failed, output:" % script)
1028
              output = indent_re.sub('      ', output)
1029
              feedback_fn("%s" % output)
1030
              lu_result = 1
1031

    
1032
      return lu_result
1033

    
1034

    
1035
class LUVerifyDisks(NoHooksLU):
1036
  """Verifies the cluster disks status.
1037

1038
  """
1039
  _OP_REQP = []
1040
  REQ_BGL = False
1041

    
1042
  def ExpandNames(self):
1043
    self.needed_locks = {
1044
      locking.LEVEL_NODE: locking.ALL_SET,
1045
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1046
    }
1047
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1048

    
1049
  def CheckPrereq(self):
1050
    """Check prerequisites.
1051

1052
    This has no prerequisites.
1053

1054
    """
1055
    pass
1056

    
1057
  def Exec(self, feedback_fn):
1058
    """Verify integrity of cluster disks.
1059

1060
    """
1061
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
1062

    
1063
    vg_name = self.cfg.GetVGName()
1064
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1065
    instances = [self.cfg.GetInstanceInfo(name)
1066
                 for name in self.cfg.GetInstanceList()]
1067

    
1068
    nv_dict = {}
1069
    for inst in instances:
1070
      inst_lvs = {}
1071
      if (inst.status != "up" or
1072
          inst.disk_template not in constants.DTS_NET_MIRROR):
1073
        continue
1074
      inst.MapLVsByNode(inst_lvs)
1075
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1076
      for node, vol_list in inst_lvs.iteritems():
1077
        for vol in vol_list:
1078
          nv_dict[(node, vol)] = inst
1079

    
1080
    if not nv_dict:
1081
      return result
1082

    
1083
    node_lvs = self.rpc.call_volume_list(nodes, vg_name)
1084

    
1085
    to_act = set()
1086
    for node in nodes:
1087
      # node_volume
1088
      lvs = node_lvs[node]
1089

    
1090
      if isinstance(lvs, basestring):
1091
        logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
1092
        res_nlvm[node] = lvs
1093
      elif not isinstance(lvs, dict):
1094
        logging.warning("Connection to node %s failed or invalid data"
1095
                        " returned", node)
1096
        res_nodes.append(node)
1097
        continue
1098

    
1099
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
1100
        inst = nv_dict.pop((node, lv_name), None)
1101
        if (not lv_online and inst is not None
1102
            and inst.name not in res_instances):
1103
          res_instances.append(inst.name)
1104

    
1105
    # any leftover items in nv_dict are missing LVs, let's arrange the
1106
    # data better
1107
    for key, inst in nv_dict.iteritems():
1108
      if inst.name not in res_missing:
1109
        res_missing[inst.name] = []
1110
      res_missing[inst.name].append(key)
1111

    
1112
    return result
1113

    
1114

    
1115
class LURenameCluster(LogicalUnit):
1116
  """Rename the cluster.
1117

1118
  """
1119
  HPATH = "cluster-rename"
1120
  HTYPE = constants.HTYPE_CLUSTER
1121
  _OP_REQP = ["name"]
1122

    
1123
  def BuildHooksEnv(self):
1124
    """Build hooks env.
1125

1126
    """
1127
    env = {
1128
      "OP_TARGET": self.cfg.GetClusterName(),
1129
      "NEW_NAME": self.op.name,
1130
      }
1131
    mn = self.cfg.GetMasterNode()
1132
    return env, [mn], [mn]
1133

    
1134
  def CheckPrereq(self):
1135
    """Verify that the passed name is a valid one.
1136

1137
    """
1138
    hostname = utils.HostInfo(self.op.name)
1139

    
1140
    new_name = hostname.name
1141
    self.ip = new_ip = hostname.ip
1142
    old_name = self.cfg.GetClusterName()
1143
    old_ip = self.cfg.GetMasterIP()
1144
    if new_name == old_name and new_ip == old_ip:
1145
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1146
                                 " cluster has changed")
1147
    if new_ip != old_ip:
1148
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1149
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1150
                                   " reachable on the network. Aborting." %
1151
                                   new_ip)
1152

    
1153
    self.op.name = new_name
1154

    
1155
  def Exec(self, feedback_fn):
1156
    """Rename the cluster.
1157

1158
    """
1159
    clustername = self.op.name
1160
    ip = self.ip
1161

    
1162
    # shutdown the master IP
1163
    master = self.cfg.GetMasterNode()
1164
    if not self.rpc.call_node_stop_master(master, False):
1165
      raise errors.OpExecError("Could not disable the master role")
1166

    
1167
    try:
1168
      # modify the sstore
1169
      # TODO: sstore
1170
      ss.SetKey(ss.SS_MASTER_IP, ip)
1171
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
1172

    
1173
      # Distribute updated ss config to all nodes
1174
      myself = self.cfg.GetNodeInfo(master)
1175
      dist_nodes = self.cfg.GetNodeList()
1176
      if myself.name in dist_nodes:
1177
        dist_nodes.remove(myself.name)
1178

    
1179
      logging.debug("Copying updated ssconf data to all nodes")
1180
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1181
        fname = ss.KeyToFilename(keyname)
1182
        result = self.rpc.call_upload_file(dist_nodes, fname)
1183
        for to_node in dist_nodes:
1184
          if not result[to_node]:
1185
            self.LogWarning("Copy of file %s to node %s failed",
1186
                            fname, to_node)
1187
    finally:
1188
      if not self.rpc.call_node_start_master(master, False):
1189
        self.LogWarning("Could not re-enable the master role on"
1190
                        " the master, please restart manually.")
1191

    
1192

    
1193
def _RecursiveCheckIfLVMBased(disk):
1194
  """Check if the given disk or its children are lvm-based.
1195

1196
  @type disk: L{objects.Disk}
1197
  @param disk: the disk to check
1198
  @rtype: booleean
1199
  @return: boolean indicating whether a LD_LV dev_type was found or not
1200

1201
  """
1202
  if disk.children:
1203
    for chdisk in disk.children:
1204
      if _RecursiveCheckIfLVMBased(chdisk):
1205
        return True
1206
  return disk.dev_type == constants.LD_LV
1207

    
1208

    
1209
class LUSetClusterParams(LogicalUnit):
1210
  """Change the parameters of the cluster.
1211

1212
  """
1213
  HPATH = "cluster-modify"
1214
  HTYPE = constants.HTYPE_CLUSTER
1215
  _OP_REQP = []
1216
  REQ_BGL = False
1217

    
1218
  def CheckParameters(self):
1219
    """Check parameters
1220

1221
    """
1222
    if not hasattr(self.op, "candidate_pool_size"):
1223
      self.op.candidate_pool_size = None
1224
    if self.op.candidate_pool_size is not None:
1225
      try:
1226
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1227
      except ValueError, err:
1228
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1229
                                   str(err))
1230
      if self.op.candidate_pool_size < 1:
1231
        raise errors.OpPrereqError("At least one master candidate needed")
1232

    
1233
  def ExpandNames(self):
1234
    # FIXME: in the future maybe other cluster params won't require checking on
1235
    # all nodes to be modified.
1236
    self.needed_locks = {
1237
      locking.LEVEL_NODE: locking.ALL_SET,
1238
    }
1239
    self.share_locks[locking.LEVEL_NODE] = 1
1240

    
1241
  def BuildHooksEnv(self):
1242
    """Build hooks env.
1243

1244
    """
1245
    env = {
1246
      "OP_TARGET": self.cfg.GetClusterName(),
1247
      "NEW_VG_NAME": self.op.vg_name,
1248
      }
1249
    mn = self.cfg.GetMasterNode()
1250
    return env, [mn], [mn]
1251

    
1252
  def CheckPrereq(self):
1253
    """Check prerequisites.
1254

1255
    This checks whether the given params don't conflict and
1256
    if the given volume group is valid.
1257

1258
    """
1259
    # FIXME: This only works because there is only one parameter that can be
1260
    # changed or removed.
1261
    if self.op.vg_name is not None and not self.op.vg_name:
1262
      instances = self.cfg.GetAllInstancesInfo().values()
1263
      for inst in instances:
1264
        for disk in inst.disks:
1265
          if _RecursiveCheckIfLVMBased(disk):
1266
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1267
                                       " lvm-based instances exist")
1268

    
1269
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1270

    
1271
    # if vg_name not None, checks given volume group on all nodes
1272
    if self.op.vg_name:
1273
      vglist = self.rpc.call_vg_list(node_list)
1274
      for node in node_list:
1275
        vgstatus = utils.CheckVolumeGroupSize(vglist[node], self.op.vg_name,
1276
                                              constants.MIN_VG_SIZE)
1277
        if vgstatus:
1278
          raise errors.OpPrereqError("Error on node '%s': %s" %
1279
                                     (node, vgstatus))
1280

    
1281
    self.cluster = cluster = self.cfg.GetClusterInfo()
1282
    # beparams changes do not need validation (we can't validate?),
1283
    # but we still process here
1284
    if self.op.beparams:
1285
      self.new_beparams = cluster.FillDict(
1286
        cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)
1287

    
1288
    # hypervisor list/parameters
1289
    self.new_hvparams = cluster.FillDict(cluster.hvparams, {})
1290
    if self.op.hvparams:
1291
      if not isinstance(self.op.hvparams, dict):
1292
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1293
      for hv_name, hv_dict in self.op.hvparams.items():
1294
        if hv_name not in self.new_hvparams:
1295
          self.new_hvparams[hv_name] = hv_dict
1296
        else:
1297
          self.new_hvparams[hv_name].update(hv_dict)
1298

    
1299
    if self.op.enabled_hypervisors is not None:
1300
      self.hv_list = self.op.enabled_hypervisors
1301
    else:
1302
      self.hv_list = cluster.enabled_hypervisors
1303

    
1304
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1305
      # either the enabled list has changed, or the parameters have, validate
1306
      for hv_name, hv_params in self.new_hvparams.items():
1307
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1308
            (self.op.enabled_hypervisors and
1309
             hv_name in self.op.enabled_hypervisors)):
1310
          # either this is a new hypervisor, or its parameters have changed
1311
          hv_class = hypervisor.GetHypervisor(hv_name)
1312
          hv_class.CheckParameterSyntax(hv_params)
1313
          _CheckHVParams(self, node_list, hv_name, hv_params)
1314

    
1315
  def Exec(self, feedback_fn):
1316
    """Change the parameters of the cluster.
1317

1318
    """
1319
    if self.op.vg_name is not None:
1320
      if self.op.vg_name != self.cfg.GetVGName():
1321
        self.cfg.SetVGName(self.op.vg_name)
1322
      else:
1323
        feedback_fn("Cluster LVM configuration already in desired"
1324
                    " state, not changing")
1325
    if self.op.hvparams:
1326
      self.cluster.hvparams = self.new_hvparams
1327
    if self.op.enabled_hypervisors is not None:
1328
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1329
    if self.op.beparams:
1330
      self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams
1331
    if self.op.candidate_pool_size is not None:
1332
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
1333

    
1334
    self.cfg.Update(self.cluster)
1335

    
1336
    # we want to update nodes after the cluster so that if any errors
1337
    # happen, we have recorded and saved the cluster info
1338
    if self.op.candidate_pool_size is not None:
1339
      node_info = self.cfg.GetAllNodesInfo().values()
1340
      num_candidates = len([node for node in node_info
1341
                            if node.master_candidate])
1342
      num_nodes = len(node_info)
1343
      if num_candidates < self.op.candidate_pool_size:
1344
        random.shuffle(node_info)
1345
        for node in node_info:
1346
          if num_candidates >= self.op.candidate_pool_size:
1347
            break
1348
          if node.master_candidate:
1349
            continue
1350
          node.master_candidate = True
1351
          self.LogInfo("Promoting node %s to master candidate", node.name)
1352
          self.cfg.Update(node)
1353
          self.context.ReaddNode(node)
1354
          num_candidates += 1
1355
      elif num_candidates > self.op.candidate_pool_size:
1356
        self.LogInfo("Note: more nodes are candidates (%d) than the new value"
1357
                     " of candidate_pool_size (%d)" %
1358
                     (num_candidates, self.op.candidate_pool_size))
1359

    
1360

    
1361
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1362
  """Sleep and poll for an instance's disk to sync.
1363

1364
  """
1365
  if not instance.disks:
1366
    return True
1367

    
1368
  if not oneshot:
1369
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1370

    
1371
  node = instance.primary_node
1372

    
1373
  for dev in instance.disks:
1374
    lu.cfg.SetDiskID(dev, node)
1375

    
1376
  retries = 0
1377
  while True:
1378
    max_time = 0
1379
    done = True
1380
    cumul_degraded = False
1381
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1382
    if not rstats:
1383
      lu.LogWarning("Can't get any data from node %s", node)
1384
      retries += 1
1385
      if retries >= 10:
1386
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1387
                                 " aborting." % node)
1388
      time.sleep(6)
1389
      continue
1390
    retries = 0
1391
    for i in range(len(rstats)):
1392
      mstat = rstats[i]
1393
      if mstat is None:
1394
        lu.LogWarning("Can't compute data for node %s/%s",
1395
                           node, instance.disks[i].iv_name)
1396
        continue
1397
      # we ignore the ldisk parameter
1398
      perc_done, est_time, is_degraded, _ = mstat
1399
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1400
      if perc_done is not None:
1401
        done = False
1402
        if est_time is not None:
1403
          rem_time = "%d estimated seconds remaining" % est_time
1404
          max_time = est_time
1405
        else:
1406
          rem_time = "no time estimate"
1407
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1408
                        (instance.disks[i].iv_name, perc_done, rem_time))
1409
    if done or oneshot:
1410
      break
1411

    
1412
    time.sleep(min(60, max_time))
1413

    
1414
  if done:
1415
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1416
  return not cumul_degraded
1417

    
1418

    
1419
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1420
  """Check that mirrors are not degraded.
1421

1422
  The ldisk parameter, if True, will change the test from the
1423
  is_degraded attribute (which represents overall non-ok status for
1424
  the device(s)) to the ldisk (representing the local storage status).
1425

1426
  """
1427
  lu.cfg.SetDiskID(dev, node)
1428
  if ldisk:
1429
    idx = 6
1430
  else:
1431
    idx = 5
1432

    
1433
  result = True
1434
  if on_primary or dev.AssembleOnSecondary():
1435
    rstats = lu.rpc.call_blockdev_find(node, dev)
1436
    if not rstats:
1437
      logging.warning("Node %s: disk degraded, not found or node down", node)
1438
      result = False
1439
    else:
1440
      result = result and (not rstats[idx])
1441
  if dev.children:
1442
    for child in dev.children:
1443
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1444

    
1445
  return result
1446

    
1447

    
1448
class LUDiagnoseOS(NoHooksLU):
1449
  """Logical unit for OS diagnose/query.
1450

1451
  """
1452
  _OP_REQP = ["output_fields", "names"]
1453
  REQ_BGL = False
1454
  _FIELDS_STATIC = utils.FieldSet()
1455
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
1456

    
1457
  def ExpandNames(self):
1458
    if self.op.names:
1459
      raise errors.OpPrereqError("Selective OS query not supported")
1460

    
1461
    _CheckOutputFields(static=self._FIELDS_STATIC,
1462
                       dynamic=self._FIELDS_DYNAMIC,
1463
                       selected=self.op.output_fields)
1464

    
1465
    # Lock all nodes, in shared mode
1466
    self.needed_locks = {}
1467
    self.share_locks[locking.LEVEL_NODE] = 1
1468
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1469

    
1470
  def CheckPrereq(self):
1471
    """Check prerequisites.
1472

1473
    """
1474

    
1475
  @staticmethod
1476
  def _DiagnoseByOS(node_list, rlist):
1477
    """Remaps a per-node return list into an a per-os per-node dictionary
1478

1479
    @param node_list: a list with the names of all nodes
1480
    @param rlist: a map with node names as keys and OS objects as values
1481

1482
    @rtype: dict
1483
    @returns: a dictionary with osnames as keys and as value another map, with
1484
        nodes as keys and list of OS objects as values, eg::
1485

1486
          {"debian-etch": {"node1": [<object>,...],
1487
                           "node2": [<object>,]}
1488
          }
1489

1490
    """
1491
    all_os = {}
1492
    for node_name, nr in rlist.iteritems():
1493
      if not nr:
1494
        continue
1495
      for os_obj in nr:
1496
        if os_obj.name not in all_os:
1497
          # build a list of nodes for this os containing empty lists
1498
          # for each node in node_list
1499
          all_os[os_obj.name] = {}
1500
          for nname in node_list:
1501
            all_os[os_obj.name][nname] = []
1502
        all_os[os_obj.name][node_name].append(os_obj)
1503
    return all_os
1504

    
1505
  def Exec(self, feedback_fn):
1506
    """Compute the list of OSes.
1507

1508
    """
1509
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1510
    node_data = self.rpc.call_os_diagnose(node_list)
1511
    if node_data == False:
1512
      raise errors.OpExecError("Can't gather the list of OSes")
1513
    pol = self._DiagnoseByOS(node_list, node_data)
1514
    output = []
1515
    for os_name, os_data in pol.iteritems():
1516
      row = []
1517
      for field in self.op.output_fields:
1518
        if field == "name":
1519
          val = os_name
1520
        elif field == "valid":
1521
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1522
        elif field == "node_status":
1523
          val = {}
1524
          for node_name, nos_list in os_data.iteritems():
1525
            val[node_name] = [(v.status, v.path) for v in nos_list]
1526
        else:
1527
          raise errors.ParameterError(field)
1528
        row.append(val)
1529
      output.append(row)
1530

    
1531
    return output
1532

    
1533

    
1534
class LURemoveNode(LogicalUnit):
1535
  """Logical unit for removing a node.
1536

1537
  """
1538
  HPATH = "node-remove"
1539
  HTYPE = constants.HTYPE_NODE
1540
  _OP_REQP = ["node_name"]
1541

    
1542
  def BuildHooksEnv(self):
1543
    """Build hooks env.
1544

1545
    This doesn't run on the target node in the pre phase as a failed
1546
    node would then be impossible to remove.
1547

1548
    """
1549
    env = {
1550
      "OP_TARGET": self.op.node_name,
1551
      "NODE_NAME": self.op.node_name,
1552
      }
1553
    all_nodes = self.cfg.GetNodeList()
1554
    all_nodes.remove(self.op.node_name)
1555
    return env, all_nodes, all_nodes
1556

    
1557
  def CheckPrereq(self):
1558
    """Check prerequisites.
1559

1560
    This checks:
1561
     - the node exists in the configuration
1562
     - it does not have primary or secondary instances
1563
     - it's not the master
1564

1565
    Any errors are signalled by raising errors.OpPrereqError.
1566

1567
    """
1568
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1569
    if node is None:
1570
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1571

    
1572
    instance_list = self.cfg.GetInstanceList()
1573

    
1574
    masternode = self.cfg.GetMasterNode()
1575
    if node.name == masternode:
1576
      raise errors.OpPrereqError("Node is the master node,"
1577
                                 " you need to failover first.")
1578

    
1579
    for instance_name in instance_list:
1580
      instance = self.cfg.GetInstanceInfo(instance_name)
1581
      if node.name == instance.primary_node:
1582
        raise errors.OpPrereqError("Instance %s still running on the node,"
1583
                                   " please remove first." % instance_name)
1584
      if node.name in instance.secondary_nodes:
1585
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1586
                                   " please remove first." % instance_name)
1587
    self.op.node_name = node.name
1588
    self.node = node
1589

    
1590
  def Exec(self, feedback_fn):
1591
    """Removes the node from the cluster.
1592

1593
    """
1594
    node = self.node
1595
    logging.info("Stopping the node daemon and removing configs from node %s",
1596
                 node.name)
1597

    
1598
    self.context.RemoveNode(node.name)
1599

    
1600
    self.rpc.call_node_leave_cluster(node.name)
1601

    
1602

    
1603
class LUQueryNodes(NoHooksLU):
1604
  """Logical unit for querying nodes.
1605

1606
  """
1607
  _OP_REQP = ["output_fields", "names"]
1608
  REQ_BGL = False
1609
  _FIELDS_DYNAMIC = utils.FieldSet(
1610
    "dtotal", "dfree",
1611
    "mtotal", "mnode", "mfree",
1612
    "bootid",
1613
    "ctotal",
1614
    )
1615

    
1616
  _FIELDS_STATIC = utils.FieldSet(
1617
    "name", "pinst_cnt", "sinst_cnt",
1618
    "pinst_list", "sinst_list",
1619
    "pip", "sip", "tags",
1620
    "serial_no",
1621
    "master_candidate",
1622
    "master",
1623
    )
1624

    
1625
  def ExpandNames(self):
1626
    _CheckOutputFields(static=self._FIELDS_STATIC,
1627
                       dynamic=self._FIELDS_DYNAMIC,
1628
                       selected=self.op.output_fields)
1629

    
1630
    self.needed_locks = {}
1631
    self.share_locks[locking.LEVEL_NODE] = 1
1632

    
1633
    if self.op.names:
1634
      self.wanted = _GetWantedNodes(self, self.op.names)
1635
    else:
1636
      self.wanted = locking.ALL_SET
1637

    
1638
    self.do_locking = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
1639
    if self.do_locking:
1640
      # if we don't request only static fields, we need to lock the nodes
1641
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
1642

    
1643

    
1644
  def CheckPrereq(self):
1645
    """Check prerequisites.
1646

1647
    """
1648
    # The validation of the node list is done in the _GetWantedNodes,
1649
    # if non empty, and if empty, there's no validation to do
1650
    pass
1651

    
1652
  def Exec(self, feedback_fn):
1653
    """Computes the list of nodes and their attributes.
1654

1655
    """
1656
    all_info = self.cfg.GetAllNodesInfo()
1657
    if self.do_locking:
1658
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
1659
    elif self.wanted != locking.ALL_SET:
1660
      nodenames = self.wanted
1661
      missing = set(nodenames).difference(all_info.keys())
1662
      if missing:
1663
        raise errors.OpExecError(
1664
          "Some nodes were removed before retrieving their data: %s" % missing)
1665
    else:
1666
      nodenames = all_info.keys()
1667

    
1668
    nodenames = utils.NiceSort(nodenames)
1669
    nodelist = [all_info[name] for name in nodenames]
1670

    
1671
    # begin data gathering
1672

    
1673
    if self.do_locking:
1674
      live_data = {}
1675
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
1676
                                          self.cfg.GetHypervisorType())
1677
      for name in nodenames:
1678
        nodeinfo = node_data.get(name, None)
1679
        if nodeinfo:
1680
          fn = utils.TryConvert
1681
          live_data[name] = {
1682
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
1683
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
1684
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
1685
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
1686
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
1687
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
1688
            "bootid": nodeinfo.get('bootid', None),
1689
            }
1690
        else:
1691
          live_data[name] = {}
1692
    else:
1693
      live_data = dict.fromkeys(nodenames, {})
1694

    
1695
    node_to_primary = dict([(name, set()) for name in nodenames])
1696
    node_to_secondary = dict([(name, set()) for name in nodenames])
1697

    
1698
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1699
                             "sinst_cnt", "sinst_list"))
1700
    if inst_fields & frozenset(self.op.output_fields):
1701
      instancelist = self.cfg.GetInstanceList()
1702

    
1703
      for instance_name in instancelist:
1704
        inst = self.cfg.GetInstanceInfo(instance_name)
1705
        if inst.primary_node in node_to_primary:
1706
          node_to_primary[inst.primary_node].add(inst.name)
1707
        for secnode in inst.secondary_nodes:
1708
          if secnode in node_to_secondary:
1709
            node_to_secondary[secnode].add(inst.name)
1710

    
1711
    master_node = self.cfg.GetMasterNode()
1712

    
1713
    # end data gathering
1714

    
1715
    output = []
1716
    for node in nodelist:
1717
      node_output = []
1718
      for field in self.op.output_fields:
1719
        if field == "name":
1720
          val = node.name
1721
        elif field == "pinst_list":
1722
          val = list(node_to_primary[node.name])
1723
        elif field == "sinst_list":
1724
          val = list(node_to_secondary[node.name])
1725
        elif field == "pinst_cnt":
1726
          val = len(node_to_primary[node.name])
1727
        elif field == "sinst_cnt":
1728
          val = len(node_to_secondary[node.name])
1729
        elif field == "pip":
1730
          val = node.primary_ip
1731
        elif field == "sip":
1732
          val = node.secondary_ip
1733
        elif field == "tags":
1734
          val = list(node.GetTags())
1735
        elif field == "serial_no":
1736
          val = node.serial_no
1737
        elif field == "master_candidate":
1738
          val = node.master_candidate
1739
        elif field == "master":
1740
          val = node.name == master_node
1741
        elif self._FIELDS_DYNAMIC.Matches(field):
1742
          val = live_data[node.name].get(field, None)
1743
        else:
1744
          raise errors.ParameterError(field)
1745
        node_output.append(val)
1746
      output.append(node_output)
1747

    
1748
    return output
1749

    
1750

    
1751
class LUQueryNodeVolumes(NoHooksLU):
1752
  """Logical unit for getting volumes on node(s).
1753

1754
  """
1755
  _OP_REQP = ["nodes", "output_fields"]
1756
  REQ_BGL = False
1757
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
1758
  _FIELDS_STATIC = utils.FieldSet("node")
1759

    
1760
  def ExpandNames(self):
1761
    _CheckOutputFields(static=self._FIELDS_STATIC,
1762
                       dynamic=self._FIELDS_DYNAMIC,
1763
                       selected=self.op.output_fields)
1764

    
1765
    self.needed_locks = {}
1766
    self.share_locks[locking.LEVEL_NODE] = 1
1767
    if not self.op.nodes:
1768
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1769
    else:
1770
      self.needed_locks[locking.LEVEL_NODE] = \
1771
        _GetWantedNodes(self, self.op.nodes)
1772

    
1773
  def CheckPrereq(self):
1774
    """Check prerequisites.
1775

1776
    This checks that the fields required are valid output fields.
1777

1778
    """
1779
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
1780

    
1781
  def Exec(self, feedback_fn):
1782
    """Computes the list of nodes and their attributes.
1783

1784
    """
1785
    nodenames = self.nodes
1786
    volumes = self.rpc.call_node_volumes(nodenames)
1787

    
1788
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1789
             in self.cfg.GetInstanceList()]
1790

    
1791
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1792

    
1793
    output = []
1794
    for node in nodenames:
1795
      if node not in volumes or not volumes[node]:
1796
        continue
1797

    
1798
      node_vols = volumes[node][:]
1799
      node_vols.sort(key=lambda vol: vol['dev'])
1800

    
1801
      for vol in node_vols:
1802
        node_output = []
1803
        for field in self.op.output_fields:
1804
          if field == "node":
1805
            val = node
1806
          elif field == "phys":
1807
            val = vol['dev']
1808
          elif field == "vg":
1809
            val = vol['vg']
1810
          elif field == "name":
1811
            val = vol['name']
1812
          elif field == "size":
1813
            val = int(float(vol['size']))
1814
          elif field == "instance":
1815
            for inst in ilist:
1816
              if node not in lv_by_node[inst]:
1817
                continue
1818
              if vol['name'] in lv_by_node[inst][node]:
1819
                val = inst.name
1820
                break
1821
            else:
1822
              val = '-'
1823
          else:
1824
            raise errors.ParameterError(field)
1825
          node_output.append(str(val))
1826

    
1827
        output.append(node_output)
1828

    
1829
    return output
1830

    
1831

    
1832
class LUAddNode(LogicalUnit):
1833
  """Logical unit for adding node to the cluster.
1834

1835
  """
1836
  HPATH = "node-add"
1837
  HTYPE = constants.HTYPE_NODE
1838
  _OP_REQP = ["node_name"]
1839

    
1840
  def BuildHooksEnv(self):
1841
    """Build hooks env.
1842

1843
    This will run on all nodes before, and on all nodes + the new node after.
1844

1845
    """
1846
    env = {
1847
      "OP_TARGET": self.op.node_name,
1848
      "NODE_NAME": self.op.node_name,
1849
      "NODE_PIP": self.op.primary_ip,
1850
      "NODE_SIP": self.op.secondary_ip,
1851
      }
1852
    nodes_0 = self.cfg.GetNodeList()
1853
    nodes_1 = nodes_0 + [self.op.node_name, ]
1854
    return env, nodes_0, nodes_1
1855

    
1856
  def CheckPrereq(self):
1857
    """Check prerequisites.
1858

1859
    This checks:
1860
     - the new node is not already in the config
1861
     - it is resolvable
1862
     - its parameters (single/dual homed) matches the cluster
1863

1864
    Any errors are signalled by raising errors.OpPrereqError.
1865

1866
    """
1867
    node_name = self.op.node_name
1868
    cfg = self.cfg
1869

    
1870
    dns_data = utils.HostInfo(node_name)
1871

    
1872
    node = dns_data.name
1873
    primary_ip = self.op.primary_ip = dns_data.ip
1874
    secondary_ip = getattr(self.op, "secondary_ip", None)
1875
    if secondary_ip is None:
1876
      secondary_ip = primary_ip
1877
    if not utils.IsValidIP(secondary_ip):
1878
      raise errors.OpPrereqError("Invalid secondary IP given")
1879
    self.op.secondary_ip = secondary_ip
1880

    
1881
    node_list = cfg.GetNodeList()
1882
    if not self.op.readd and node in node_list:
1883
      raise errors.OpPrereqError("Node %s is already in the configuration" %
1884
                                 node)
1885
    elif self.op.readd and node not in node_list:
1886
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
1887

    
1888
    for existing_node_name in node_list:
1889
      existing_node = cfg.GetNodeInfo(existing_node_name)
1890

    
1891
      if self.op.readd and node == existing_node_name:
1892
        if (existing_node.primary_ip != primary_ip or
1893
            existing_node.secondary_ip != secondary_ip):
1894
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
1895
                                     " address configuration as before")
1896
        continue
1897

    
1898
      if (existing_node.primary_ip == primary_ip or
1899
          existing_node.secondary_ip == primary_ip or
1900
          existing_node.primary_ip == secondary_ip or
1901
          existing_node.secondary_ip == secondary_ip):
1902
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1903
                                   " existing node %s" % existing_node.name)
1904

    
1905
    # check that the type of the node (single versus dual homed) is the
1906
    # same as for the master
1907
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
1908
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1909
    newbie_singlehomed = secondary_ip == primary_ip
1910
    if master_singlehomed != newbie_singlehomed:
1911
      if master_singlehomed:
1912
        raise errors.OpPrereqError("The master has no private ip but the"
1913
                                   " new node has one")
1914
      else:
1915
        raise errors.OpPrereqError("The master has a private ip but the"
1916
                                   " new node doesn't have one")
1917

    
1918
    # checks reachablity
1919
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
1920
      raise errors.OpPrereqError("Node not reachable by ping")
1921

    
1922
    if not newbie_singlehomed:
1923
      # check reachability from my secondary ip to newbie's secondary ip
1924
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
1925
                           source=myself.secondary_ip):
1926
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1927
                                   " based ping to noded port")
1928

    
1929
    self.new_node = objects.Node(name=node,
1930
                                 primary_ip=primary_ip,
1931
                                 secondary_ip=secondary_ip)
1932

    
1933
  def Exec(self, feedback_fn):
1934
    """Adds the new node to the cluster.
1935

1936
    """
1937
    new_node = self.new_node
1938
    node = new_node.name
1939

    
1940
    # check connectivity
1941
    result = self.rpc.call_version([node])[node]
1942
    if result:
1943
      if constants.PROTOCOL_VERSION == result:
1944
        logging.info("Communication to node %s fine, sw version %s match",
1945
                     node, result)
1946
      else:
1947
        raise errors.OpExecError("Version mismatch master version %s,"
1948
                                 " node version %s" %
1949
                                 (constants.PROTOCOL_VERSION, result))
1950
    else:
1951
      raise errors.OpExecError("Cannot get version from the new node")
1952

    
1953
    # setup ssh on node
1954
    logging.info("Copy ssh key to node %s", node)
1955
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1956
    keyarray = []
1957
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1958
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1959
                priv_key, pub_key]
1960

    
1961
    for i in keyfiles:
1962
      f = open(i, 'r')
1963
      try:
1964
        keyarray.append(f.read())
1965
      finally:
1966
        f.close()
1967

    
1968
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
1969
                                    keyarray[2],
1970
                                    keyarray[3], keyarray[4], keyarray[5])
1971

    
1972
    if not result:
1973
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1974

    
1975
    # Add node to our /etc/hosts, and add key to known_hosts
1976
    utils.AddHostToEtcHosts(new_node.name)
1977

    
1978
    if new_node.secondary_ip != new_node.primary_ip:
1979
      if not self.rpc.call_node_has_ip_address(new_node.name,
1980
                                               new_node.secondary_ip):
1981
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
1982
                                 " you gave (%s). Please fix and re-run this"
1983
                                 " command." % new_node.secondary_ip)
1984

    
1985
    node_verify_list = [self.cfg.GetMasterNode()]
1986
    node_verify_param = {
1987
      'nodelist': [node],
1988
      # TODO: do a node-net-test as well?
1989
    }
1990

    
1991
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
1992
                                       self.cfg.GetClusterName())
1993
    for verifier in node_verify_list:
1994
      if not result[verifier]:
1995
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
1996
                                 " for remote verification" % verifier)
1997
      if result[verifier]['nodelist']:
1998
        for failed in result[verifier]['nodelist']:
1999
          feedback_fn("ssh/hostname verification failed %s -> %s" %
2000
                      (verifier, result[verifier]['nodelist'][failed]))
2001
        raise errors.OpExecError("ssh/hostname verification failed.")
2002

    
2003
    # Distribute updated /etc/hosts and known_hosts to all nodes,
2004
    # including the node just added
2005
    myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
2006
    dist_nodes = self.cfg.GetNodeList()
2007
    if not self.op.readd:
2008
      dist_nodes.append(node)
2009
    if myself.name in dist_nodes:
2010
      dist_nodes.remove(myself.name)
2011

    
2012
    logging.debug("Copying hosts and known_hosts to all nodes")
2013
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
2014
      result = self.rpc.call_upload_file(dist_nodes, fname)
2015
      for to_node in dist_nodes:
2016
        if not result[to_node]:
2017
          logging.error("Copy of file %s to node %s failed", fname, to_node)
2018

    
2019
    to_copy = []
2020
    if constants.HT_XEN_HVM in self.cfg.GetClusterInfo().enabled_hypervisors:
2021
      to_copy.append(constants.VNC_PASSWORD_FILE)
2022
    for fname in to_copy:
2023
      result = self.rpc.call_upload_file([node], fname)
2024
      if not result[node]:
2025
        logging.error("Could not copy file %s to node %s", fname, node)
2026

    
2027
    if self.op.readd:
2028
      self.context.ReaddNode(new_node)
2029
    else:
2030
      self.context.AddNode(new_node)
2031

    
2032

    
2033
class LUSetNodeParams(LogicalUnit):
2034
  """Modifies the parameters of a node.
2035

2036
  """
2037
  HPATH = "node-modify"
2038
  HTYPE = constants.HTYPE_NODE
2039
  _OP_REQP = ["node_name"]
2040
  REQ_BGL = False
2041

    
2042
  def CheckArguments(self):
2043
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2044
    if node_name is None:
2045
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2046
    self.op.node_name = node_name
2047
    if not hasattr(self.op, 'master_candidate'):
2048
      raise errors.OpPrereqError("Please pass at least one modification")
2049
    self.op.master_candidate = bool(self.op.master_candidate)
2050

    
2051
  def ExpandNames(self):
2052
    self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
2053

    
2054
  def BuildHooksEnv(self):
2055
    """Build hooks env.
2056

2057
    This runs on the master node.
2058

2059
    """
2060
    env = {
2061
      "OP_TARGET": self.op.node_name,
2062
      "MASTER_CANDIDATE": str(self.op.master_candidate),
2063
      }
2064
    nl = [self.cfg.GetMasterNode(),
2065
          self.op.node_name]
2066
    return env, nl, nl
2067

    
2068
  def CheckPrereq(self):
2069
    """Check prerequisites.
2070

2071
    This only checks the instance list against the existing names.
2072

2073
    """
2074
    force = self.force = self.op.force
2075

    
2076
    if self.op.master_candidate == False:
2077
      cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2078
      node_info = self.cfg.GetAllNodesInfo().values()
2079
      num_candidates = len([node for node in node_info
2080
                            if node.master_candidate])
2081
      if num_candidates <= cp_size:
2082
        msg = ("Not enough master candidates (desired"
2083
               " %d, new value will be %d)" % (cp_size, num_candidates-1))
2084
        if force:
2085
          self.LogWarning(msg)
2086
        else:
2087
          raise errors.OpPrereqError(msg)
2088

    
2089
    return
2090

    
2091
  def Exec(self, feedback_fn):
2092
    """Modifies a node.
2093

2094
    """
2095
    node = self.cfg.GetNodeInfo(self.op.node_name)
2096

    
2097
    result = []
2098

    
2099
    if self.op.master_candidate is not None:
2100
      node.master_candidate = self.op.master_candidate
2101
      result.append(("master_candidate", str(self.op.master_candidate)))
2102

    
2103
    # this will trigger configuration file update, if needed
2104
    self.cfg.Update(node)
2105
    # this will trigger job queue propagation or cleanup
2106
    self.context.ReaddNode(node)
2107

    
2108
    return result
2109

    
2110

    
2111
class LUQueryClusterInfo(NoHooksLU):
2112
  """Query cluster configuration.
2113

2114
  """
2115
  _OP_REQP = []
2116
  REQ_BGL = False
2117

    
2118
  def ExpandNames(self):
2119
    self.needed_locks = {}
2120

    
2121
  def CheckPrereq(self):
2122
    """No prerequsites needed for this LU.
2123

2124
    """
2125
    pass
2126

    
2127
  def Exec(self, feedback_fn):
2128
    """Return cluster config.
2129

2130
    """
2131
    cluster = self.cfg.GetClusterInfo()
2132
    result = {
2133
      "software_version": constants.RELEASE_VERSION,
2134
      "protocol_version": constants.PROTOCOL_VERSION,
2135
      "config_version": constants.CONFIG_VERSION,
2136
      "os_api_version": constants.OS_API_VERSION,
2137
      "export_version": constants.EXPORT_VERSION,
2138
      "architecture": (platform.architecture()[0], platform.machine()),
2139
      "name": cluster.cluster_name,
2140
      "master": cluster.master_node,
2141
      "default_hypervisor": cluster.default_hypervisor,
2142
      "enabled_hypervisors": cluster.enabled_hypervisors,
2143
      "hvparams": cluster.hvparams,
2144
      "beparams": cluster.beparams,
2145
      "candidate_pool_size": cluster.candidate_pool_size,
2146
      }
2147

    
2148
    return result
2149

    
2150

    
2151
class LUQueryConfigValues(NoHooksLU):
2152
  """Return configuration values.
2153

2154
  """
2155
  _OP_REQP = []
2156
  REQ_BGL = False
2157
  _FIELDS_DYNAMIC = utils.FieldSet()
2158
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag")
2159

    
2160
  def ExpandNames(self):
2161
    self.needed_locks = {}
2162

    
2163
    _CheckOutputFields(static=self._FIELDS_STATIC,
2164
                       dynamic=self._FIELDS_DYNAMIC,
2165
                       selected=self.op.output_fields)
2166

    
2167
  def CheckPrereq(self):
2168
    """No prerequisites.
2169

2170
    """
2171
    pass
2172

    
2173
  def Exec(self, feedback_fn):
2174
    """Dump a representation of the cluster config to the standard output.
2175

2176
    """
2177
    values = []
2178
    for field in self.op.output_fields:
2179
      if field == "cluster_name":
2180
        entry = self.cfg.GetClusterName()
2181
      elif field == "master_node":
2182
        entry = self.cfg.GetMasterNode()
2183
      elif field == "drain_flag":
2184
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2185
      else:
2186
        raise errors.ParameterError(field)
2187
      values.append(entry)
2188
    return values
2189

    
2190

    
2191
class LUActivateInstanceDisks(NoHooksLU):
2192
  """Bring up an instance's disks.
2193

2194
  """
2195
  _OP_REQP = ["instance_name"]
2196
  REQ_BGL = False
2197

    
2198
  def ExpandNames(self):
2199
    self._ExpandAndLockInstance()
2200
    self.needed_locks[locking.LEVEL_NODE] = []
2201
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2202

    
2203
  def DeclareLocks(self, level):
2204
    if level == locking.LEVEL_NODE:
2205
      self._LockInstancesNodes()
2206

    
2207
  def CheckPrereq(self):
2208
    """Check prerequisites.
2209

2210
    This checks that the instance is in the cluster.
2211

2212
    """
2213
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2214
    assert self.instance is not None, \
2215
      "Cannot retrieve locked instance %s" % self.op.instance_name
2216

    
2217
  def Exec(self, feedback_fn):
2218
    """Activate the disks.
2219

2220
    """
2221
    disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
2222
    if not disks_ok:
2223
      raise errors.OpExecError("Cannot activate block devices")
2224

    
2225
    return disks_info
2226

    
2227

    
2228
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
2229
  """Prepare the block devices for an instance.
2230

2231
  This sets up the block devices on all nodes.
2232

2233
  @type lu: L{LogicalUnit}
2234
  @param lu: the logical unit on whose behalf we execute
2235
  @type instance: L{objects.Instance}
2236
  @param instance: the instance for whose disks we assemble
2237
  @type ignore_secondaries: boolean
2238
  @param ignore_secondaries: if true, errors on secondary nodes
2239
      won't result in an error return from the function
2240
  @return: False if the operation failed, otherwise a list of
2241
      (host, instance_visible_name, node_visible_name)
2242
      with the mapping from node devices to instance devices
2243

2244
  """
2245
  device_info = []
2246
  disks_ok = True
2247
  iname = instance.name
2248
  # With the two passes mechanism we try to reduce the window of
2249
  # opportunity for the race condition of switching DRBD to primary
2250
  # before handshaking occured, but we do not eliminate it
2251

    
2252
  # The proper fix would be to wait (with some limits) until the
2253
  # connection has been made and drbd transitions from WFConnection
2254
  # into any other network-connected state (Connected, SyncTarget,
2255
  # SyncSource, etc.)
2256

    
2257
  # 1st pass, assemble on all nodes in secondary mode
2258
  for inst_disk in instance.disks:
2259
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2260
      lu.cfg.SetDiskID(node_disk, node)
2261
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2262
      if not result:
2263
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2264
                           " (is_primary=False, pass=1)",
2265
                           inst_disk.iv_name, node)
2266
        if not ignore_secondaries:
2267
          disks_ok = False
2268

    
2269
  # FIXME: race condition on drbd migration to primary
2270

    
2271
  # 2nd pass, do only the primary node
2272
  for inst_disk in instance.disks:
2273
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2274
      if node != instance.primary_node:
2275
        continue
2276
      lu.cfg.SetDiskID(node_disk, node)
2277
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2278
      if not result:
2279
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2280
                           " (is_primary=True, pass=2)",
2281
                           inst_disk.iv_name, node)
2282
        disks_ok = False
2283
    device_info.append((instance.primary_node, inst_disk.iv_name, result))
2284

    
2285
  # leave the disks configured for the primary node
2286
  # this is a workaround that would be fixed better by
2287
  # improving the logical/physical id handling
2288
  for disk in instance.disks:
2289
    lu.cfg.SetDiskID(disk, instance.primary_node)
2290

    
2291
  return disks_ok, device_info
2292

    
2293

    
2294
def _StartInstanceDisks(lu, instance, force):
2295
  """Start the disks of an instance.
2296

2297
  """
2298
  disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2299
                                           ignore_secondaries=force)
2300
  if not disks_ok:
2301
    _ShutdownInstanceDisks(lu, instance)
2302
    if force is not None and not force:
2303
      lu.proc.LogWarning("", hint="If the message above refers to a"
2304
                         " secondary node,"
2305
                         " you can retry the operation using '--force'.")
2306
    raise errors.OpExecError("Disk consistency error")
2307

    
2308

    
2309
class LUDeactivateInstanceDisks(NoHooksLU):
2310
  """Shutdown an instance's disks.
2311

2312
  """
2313
  _OP_REQP = ["instance_name"]
2314
  REQ_BGL = False
2315

    
2316
  def ExpandNames(self):
2317
    self._ExpandAndLockInstance()
2318
    self.needed_locks[locking.LEVEL_NODE] = []
2319
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2320

    
2321
  def DeclareLocks(self, level):
2322
    if level == locking.LEVEL_NODE:
2323
      self._LockInstancesNodes()
2324

    
2325
  def CheckPrereq(self):
2326
    """Check prerequisites.
2327

2328
    This checks that the instance is in the cluster.
2329

2330
    """
2331
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2332
    assert self.instance is not None, \
2333
      "Cannot retrieve locked instance %s" % self.op.instance_name
2334

    
2335
  def Exec(self, feedback_fn):
2336
    """Deactivate the disks
2337

2338
    """
2339
    instance = self.instance
2340
    _SafeShutdownInstanceDisks(self, instance)
2341

    
2342

    
2343
def _SafeShutdownInstanceDisks(lu, instance):
2344
  """Shutdown block devices of an instance.
2345

2346
  This function checks if an instance is running, before calling
2347
  _ShutdownInstanceDisks.
2348

2349
  """
2350
  ins_l = lu.rpc.call_instance_list([instance.primary_node],
2351
                                      [instance.hypervisor])
2352
  ins_l = ins_l[instance.primary_node]
2353
  if not type(ins_l) is list:
2354
    raise errors.OpExecError("Can't contact node '%s'" %
2355
                             instance.primary_node)
2356

    
2357
  if instance.name in ins_l:
2358
    raise errors.OpExecError("Instance is running, can't shutdown"
2359
                             " block devices.")
2360

    
2361
  _ShutdownInstanceDisks(lu, instance)
2362

    
2363

    
2364
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2365
  """Shutdown block devices of an instance.
2366

2367
  This does the shutdown on all nodes of the instance.
2368

2369
  If the ignore_primary is false, errors on the primary node are
2370
  ignored.
2371

2372
  """
2373
  result = True
2374
  for disk in instance.disks:
2375
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2376
      lu.cfg.SetDiskID(top_disk, node)
2377
      if not lu.rpc.call_blockdev_shutdown(node, top_disk):
2378
        logging.error("Could not shutdown block device %s on node %s",
2379
                      disk.iv_name, node)
2380
        if not ignore_primary or node != instance.primary_node:
2381
          result = False
2382
  return result
2383

    
2384

    
2385
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor):
2386
  """Checks if a node has enough free memory.
2387

2388
  This function check if a given node has the needed amount of free
2389
  memory. In case the node has less memory or we cannot get the
2390
  information from the node, this function raise an OpPrereqError
2391
  exception.
2392

2393
  @type lu: C{LogicalUnit}
2394
  @param lu: a logical unit from which we get configuration data
2395
  @type node: C{str}
2396
  @param node: the node to check
2397
  @type reason: C{str}
2398
  @param reason: string to use in the error message
2399
  @type requested: C{int}
2400
  @param requested: the amount of memory in MiB to check for
2401
  @type hypervisor: C{str}
2402
  @param hypervisor: the hypervisor to ask for memory stats
2403
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2404
      we cannot check the node
2405

2406
  """
2407
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor)
2408
  if not nodeinfo or not isinstance(nodeinfo, dict):
2409
    raise errors.OpPrereqError("Could not contact node %s for resource"
2410
                             " information" % (node,))
2411

    
2412
  free_mem = nodeinfo[node].get('memory_free')
2413
  if not isinstance(free_mem, int):
2414
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2415
                             " was '%s'" % (node, free_mem))
2416
  if requested > free_mem:
2417
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2418
                             " needed %s MiB, available %s MiB" %
2419
                             (node, reason, requested, free_mem))
2420

    
2421

    
2422
class LUStartupInstance(LogicalUnit):
2423
  """Starts an instance.
2424

2425
  """
2426
  HPATH = "instance-start"
2427
  HTYPE = constants.HTYPE_INSTANCE
2428
  _OP_REQP = ["instance_name", "force"]
2429
  REQ_BGL = False
2430

    
2431
  def ExpandNames(self):
2432
    self._ExpandAndLockInstance()
2433

    
2434
  def BuildHooksEnv(self):
2435
    """Build hooks env.
2436

2437
    This runs on master, primary and secondary nodes of the instance.
2438

2439
    """
2440
    env = {
2441
      "FORCE": self.op.force,
2442
      }
2443
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2444
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2445
          list(self.instance.secondary_nodes))
2446
    return env, nl, nl
2447

    
2448
  def CheckPrereq(self):
2449
    """Check prerequisites.
2450

2451
    This checks that the instance is in the cluster.
2452

2453
    """
2454
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2455
    assert self.instance is not None, \
2456
      "Cannot retrieve locked instance %s" % self.op.instance_name
2457

    
2458
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2459
    # check bridges existance
2460
    _CheckInstanceBridgesExist(self, instance)
2461

    
2462
    _CheckNodeFreeMemory(self, instance.primary_node,
2463
                         "starting instance %s" % instance.name,
2464
                         bep[constants.BE_MEMORY], instance.hypervisor)
2465

    
2466
  def Exec(self, feedback_fn):
2467
    """Start the instance.
2468

2469
    """
2470
    instance = self.instance
2471
    force = self.op.force
2472
    extra_args = getattr(self.op, "extra_args", "")
2473

    
2474
    self.cfg.MarkInstanceUp(instance.name)
2475

    
2476
    node_current = instance.primary_node
2477

    
2478
    _StartInstanceDisks(self, instance, force)
2479

    
2480
    if not self.rpc.call_instance_start(node_current, instance, extra_args):
2481
      _ShutdownInstanceDisks(self, instance)
2482
      raise errors.OpExecError("Could not start instance")
2483

    
2484

    
2485
class LURebootInstance(LogicalUnit):
2486
  """Reboot an instance.
2487

2488
  """
2489
  HPATH = "instance-reboot"
2490
  HTYPE = constants.HTYPE_INSTANCE
2491
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2492
  REQ_BGL = False
2493

    
2494
  def ExpandNames(self):
2495
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2496
                                   constants.INSTANCE_REBOOT_HARD,
2497
                                   constants.INSTANCE_REBOOT_FULL]:
2498
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2499
                                  (constants.INSTANCE_REBOOT_SOFT,
2500
                                   constants.INSTANCE_REBOOT_HARD,
2501
                                   constants.INSTANCE_REBOOT_FULL))
2502
    self._ExpandAndLockInstance()
2503

    
2504
  def BuildHooksEnv(self):
2505
    """Build hooks env.
2506

2507
    This runs on master, primary and secondary nodes of the instance.
2508

2509
    """
2510
    env = {
2511
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2512
      }
2513
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2514
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2515
          list(self.instance.secondary_nodes))
2516
    return env, nl, nl
2517

    
2518
  def CheckPrereq(self):
2519
    """Check prerequisites.
2520

2521
    This checks that the instance is in the cluster.
2522

2523
    """
2524
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2525
    assert self.instance is not None, \
2526
      "Cannot retrieve locked instance %s" % self.op.instance_name
2527

    
2528
    # check bridges existance
2529
    _CheckInstanceBridgesExist(self, instance)
2530

    
2531
  def Exec(self, feedback_fn):
2532
    """Reboot the instance.
2533

2534
    """
2535
    instance = self.instance
2536
    ignore_secondaries = self.op.ignore_secondaries
2537
    reboot_type = self.op.reboot_type
2538
    extra_args = getattr(self.op, "extra_args", "")
2539

    
2540
    node_current = instance.primary_node
2541

    
2542
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2543
                       constants.INSTANCE_REBOOT_HARD]:
2544
      if not self.rpc.call_instance_reboot(node_current, instance,
2545
                                           reboot_type, extra_args):
2546
        raise errors.OpExecError("Could not reboot instance")
2547
    else:
2548
      if not self.rpc.call_instance_shutdown(node_current, instance):
2549
        raise errors.OpExecError("could not shutdown instance for full reboot")
2550
      _ShutdownInstanceDisks(self, instance)
2551
      _StartInstanceDisks(self, instance, ignore_secondaries)
2552
      if not self.rpc.call_instance_start(node_current, instance, extra_args):
2553
        _ShutdownInstanceDisks(self, instance)
2554
        raise errors.OpExecError("Could not start instance for full reboot")
2555

    
2556
    self.cfg.MarkInstanceUp(instance.name)
2557

    
2558

    
2559
class LUShutdownInstance(LogicalUnit):
2560
  """Shutdown an instance.
2561

2562
  """
2563
  HPATH = "instance-stop"
2564
  HTYPE = constants.HTYPE_INSTANCE
2565
  _OP_REQP = ["instance_name"]
2566
  REQ_BGL = False
2567

    
2568
  def ExpandNames(self):
2569
    self._ExpandAndLockInstance()
2570

    
2571
  def BuildHooksEnv(self):
2572
    """Build hooks env.
2573

2574
    This runs on master, primary and secondary nodes of the instance.
2575

2576
    """
2577
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2578
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2579
          list(self.instance.secondary_nodes))
2580
    return env, nl, nl
2581

    
2582
  def CheckPrereq(self):
2583
    """Check prerequisites.
2584

2585
    This checks that the instance is in the cluster.
2586

2587
    """
2588
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2589
    assert self.instance is not None, \
2590
      "Cannot retrieve locked instance %s" % self.op.instance_name
2591

    
2592
  def Exec(self, feedback_fn):
2593
    """Shutdown the instance.
2594

2595
    """
2596
    instance = self.instance
2597
    node_current = instance.primary_node
2598
    self.cfg.MarkInstanceDown(instance.name)
2599
    if not self.rpc.call_instance_shutdown(node_current, instance):
2600
      self.proc.LogWarning("Could not shutdown instance")
2601

    
2602
    _ShutdownInstanceDisks(self, instance)
2603

    
2604

    
2605
class LUReinstallInstance(LogicalUnit):
2606
  """Reinstall an instance.
2607

2608
  """
2609
  HPATH = "instance-reinstall"
2610
  HTYPE = constants.HTYPE_INSTANCE
2611
  _OP_REQP = ["instance_name"]
2612
  REQ_BGL = False
2613

    
2614
  def ExpandNames(self):
2615
    self._ExpandAndLockInstance()
2616

    
2617
  def BuildHooksEnv(self):
2618
    """Build hooks env.
2619

2620
    This runs on master, primary and secondary nodes of the instance.
2621

2622
    """
2623
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2624
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2625
          list(self.instance.secondary_nodes))
2626
    return env, nl, nl
2627

    
2628
  def CheckPrereq(self):
2629
    """Check prerequisites.
2630

2631
    This checks that the instance is in the cluster and is not running.
2632

2633
    """
2634
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2635
    assert instance is not None, \
2636
      "Cannot retrieve locked instance %s" % self.op.instance_name
2637

    
2638
    if instance.disk_template == constants.DT_DISKLESS:
2639
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2640
                                 self.op.instance_name)
2641
    if instance.status != "down":
2642
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2643
                                 self.op.instance_name)
2644
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2645
                                              instance.name,
2646
                                              instance.hypervisor)
2647
    if remote_info:
2648
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2649
                                 (self.op.instance_name,
2650
                                  instance.primary_node))
2651

    
2652
    self.op.os_type = getattr(self.op, "os_type", None)
2653
    if self.op.os_type is not None:
2654
      # OS verification
2655
      pnode = self.cfg.GetNodeInfo(
2656
        self.cfg.ExpandNodeName(instance.primary_node))
2657
      if pnode is None:
2658
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2659
                                   self.op.pnode)
2660
      os_obj = self.rpc.call_os_get(pnode.name, self.op.os_type)
2661
      if not os_obj:
2662
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2663
                                   " primary node"  % self.op.os_type)
2664

    
2665
    self.instance = instance
2666

    
2667
  def Exec(self, feedback_fn):
2668
    """Reinstall the instance.
2669

2670
    """
2671
    inst = self.instance
2672

    
2673
    if self.op.os_type is not None:
2674
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2675
      inst.os = self.op.os_type
2676
      self.cfg.Update(inst)
2677

    
2678
    _StartInstanceDisks(self, inst, None)
2679
    try:
2680
      feedback_fn("Running the instance OS create scripts...")
2681
      if not self.rpc.call_instance_os_add(inst.primary_node, inst):
2682
        raise errors.OpExecError("Could not install OS for instance %s"
2683
                                 " on node %s" %
2684
                                 (inst.name, inst.primary_node))
2685
    finally:
2686
      _ShutdownInstanceDisks(self, inst)
2687

    
2688

    
2689
class LURenameInstance(LogicalUnit):
2690
  """Rename an instance.
2691

2692
  """
2693
  HPATH = "instance-rename"
2694
  HTYPE = constants.HTYPE_INSTANCE
2695
  _OP_REQP = ["instance_name", "new_name"]
2696

    
2697
  def BuildHooksEnv(self):
2698
    """Build hooks env.
2699

2700
    This runs on master, primary and secondary nodes of the instance.
2701

2702
    """
2703
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2704
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2705
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2706
          list(self.instance.secondary_nodes))
2707
    return env, nl, nl
2708

    
2709
  def CheckPrereq(self):
2710
    """Check prerequisites.
2711

2712
    This checks that the instance is in the cluster and is not running.
2713

2714
    """
2715
    instance = self.cfg.GetInstanceInfo(
2716
      self.cfg.ExpandInstanceName(self.op.instance_name))
2717
    if instance is None:
2718
      raise errors.OpPrereqError("Instance '%s' not known" %
2719
                                 self.op.instance_name)
2720
    if instance.status != "down":
2721
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2722
                                 self.op.instance_name)
2723
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2724
                                              instance.name,
2725
                                              instance.hypervisor)
2726
    if remote_info:
2727
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2728
                                 (self.op.instance_name,
2729
                                  instance.primary_node))
2730
    self.instance = instance
2731

    
2732
    # new name verification
2733
    name_info = utils.HostInfo(self.op.new_name)
2734

    
2735
    self.op.new_name = new_name = name_info.name
2736
    instance_list = self.cfg.GetInstanceList()
2737
    if new_name in instance_list:
2738
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2739
                                 new_name)
2740

    
2741
    if not getattr(self.op, "ignore_ip", False):
2742
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
2743
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2744
                                   (name_info.ip, new_name))
2745

    
2746

    
2747
  def Exec(self, feedback_fn):
2748
    """Reinstall the instance.
2749

2750
    """
2751
    inst = self.instance
2752
    old_name = inst.name
2753

    
2754
    if inst.disk_template == constants.DT_FILE:
2755
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2756

    
2757
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2758
    # Change the instance lock. This is definitely safe while we hold the BGL
2759
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
2760
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
2761

    
2762
    # re-read the instance from the configuration after rename
2763
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2764

    
2765
    if inst.disk_template == constants.DT_FILE:
2766
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2767
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
2768
                                                     old_file_storage_dir,
2769
                                                     new_file_storage_dir)
2770

    
2771
      if not result:
2772
        raise errors.OpExecError("Could not connect to node '%s' to rename"
2773
                                 " directory '%s' to '%s' (but the instance"
2774
                                 " has been renamed in Ganeti)" % (
2775
                                 inst.primary_node, old_file_storage_dir,
2776
                                 new_file_storage_dir))
2777

    
2778
      if not result[0]:
2779
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2780
                                 " (but the instance has been renamed in"
2781
                                 " Ganeti)" % (old_file_storage_dir,
2782
                                               new_file_storage_dir))
2783

    
2784
    _StartInstanceDisks(self, inst, None)
2785
    try:
2786
      if not self.rpc.call_instance_run_rename(inst.primary_node, inst,
2787
                                               old_name):
2788
        msg = ("Could not run OS rename script for instance %s on node %s"
2789
               " (but the instance has been renamed in Ganeti)" %
2790
               (inst.name, inst.primary_node))
2791
        self.proc.LogWarning(msg)
2792
    finally:
2793
      _ShutdownInstanceDisks(self, inst)
2794

    
2795

    
2796
class LURemoveInstance(LogicalUnit):
2797
  """Remove an instance.
2798

2799
  """
2800
  HPATH = "instance-remove"
2801
  HTYPE = constants.HTYPE_INSTANCE
2802
  _OP_REQP = ["instance_name", "ignore_failures"]
2803
  REQ_BGL = False
2804

    
2805
  def ExpandNames(self):
2806
    self._ExpandAndLockInstance()
2807
    self.needed_locks[locking.LEVEL_NODE] = []
2808
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2809

    
2810
  def DeclareLocks(self, level):
2811
    if level == locking.LEVEL_NODE:
2812
      self._LockInstancesNodes()
2813

    
2814
  def BuildHooksEnv(self):
2815
    """Build hooks env.
2816

2817
    This runs on master, primary and secondary nodes of the instance.
2818

2819
    """
2820
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2821
    nl = [self.cfg.GetMasterNode()]
2822
    return env, nl, nl
2823

    
2824
  def CheckPrereq(self):
2825
    """Check prerequisites.
2826

2827
    This checks that the instance is in the cluster.
2828

2829
    """
2830
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2831
    assert self.instance is not None, \
2832
      "Cannot retrieve locked instance %s" % self.op.instance_name
2833

    
2834
  def Exec(self, feedback_fn):
2835
    """Remove the instance.
2836

2837
    """
2838
    instance = self.instance
2839
    logging.info("Shutting down instance %s on node %s",
2840
                 instance.name, instance.primary_node)
2841

    
2842
    if not self.rpc.call_instance_shutdown(instance.primary_node, instance):
2843
      if self.op.ignore_failures:
2844
        feedback_fn("Warning: can't shutdown instance")
2845
      else:
2846
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2847
                                 (instance.name, instance.primary_node))
2848

    
2849
    logging.info("Removing block devices for instance %s", instance.name)
2850

    
2851
    if not _RemoveDisks(self, instance):
2852
      if self.op.ignore_failures:
2853
        feedback_fn("Warning: can't remove instance's disks")
2854
      else:
2855
        raise errors.OpExecError("Can't remove instance's disks")
2856

    
2857
    logging.info("Removing instance %s out of cluster config", instance.name)
2858

    
2859
    self.cfg.RemoveInstance(instance.name)
2860
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
2861

    
2862

    
2863
class LUQueryInstances(NoHooksLU):
2864
  """Logical unit for querying instances.
2865

2866
  """
2867
  _OP_REQP = ["output_fields", "names"]
2868
  REQ_BGL = False
2869
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
2870
                                    "admin_state", "admin_ram",
2871
                                    "disk_template", "ip", "mac", "bridge",
2872
                                    "sda_size", "sdb_size", "vcpus", "tags",
2873
                                    "network_port", "beparams",
2874
                                    "(disk).(size)/([0-9]+)",
2875
                                    "(disk).(sizes)",
2876
                                    "(nic).(mac|ip|bridge)/([0-9]+)",
2877
                                    "(nic).(macs|ips|bridges)",
2878
                                    "(disk|nic).(count)",
2879
                                    "serial_no", "hypervisor", "hvparams",] +
2880
                                  ["hv/%s" % name
2881
                                   for name in constants.HVS_PARAMETERS] +
2882
                                  ["be/%s" % name
2883
                                   for name in constants.BES_PARAMETERS])
2884
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
2885

    
2886

    
2887
  def ExpandNames(self):
2888
    _CheckOutputFields(static=self._FIELDS_STATIC,
2889
                       dynamic=self._FIELDS_DYNAMIC,
2890
                       selected=self.op.output_fields)
2891

    
2892
    self.needed_locks = {}
2893
    self.share_locks[locking.LEVEL_INSTANCE] = 1
2894
    self.share_locks[locking.LEVEL_NODE] = 1
2895

    
2896
    if self.op.names:
2897
      self.wanted = _GetWantedInstances(self, self.op.names)
2898
    else:
2899
      self.wanted = locking.ALL_SET
2900

    
2901
    self.do_locking = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
2902
    if self.do_locking:
2903
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
2904
      self.needed_locks[locking.LEVEL_NODE] = []
2905
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2906

    
2907
  def DeclareLocks(self, level):
2908
    if level == locking.LEVEL_NODE and self.do_locking:
2909
      self._LockInstancesNodes()
2910

    
2911
  def CheckPrereq(self):
2912
    """Check prerequisites.
2913

2914
    """
2915
    pass
2916

    
2917
  def Exec(self, feedback_fn):
2918
    """Computes the list of nodes and their attributes.
2919

2920
    """
2921
    all_info = self.cfg.GetAllInstancesInfo()
2922
    if self.do_locking:
2923
      instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2924
    elif self.wanted != locking.ALL_SET:
2925
      instance_names = self.wanted
2926
      missing = set(instance_names).difference(all_info.keys())
2927
      if missing:
2928
        raise errors.OpExecError(
2929
          "Some instances were removed before retrieving their data: %s"
2930
          % missing)
2931
    else:
2932
      instance_names = all_info.keys()
2933

    
2934
    instance_names = utils.NiceSort(instance_names)
2935
    instance_list = [all_info[iname] for iname in instance_names]
2936

    
2937
    # begin data gathering
2938

    
2939
    nodes = frozenset([inst.primary_node for inst in instance_list])
2940
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
2941

    
2942
    bad_nodes = []
2943
    if self.do_locking:
2944
      live_data = {}
2945
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
2946
      for name in nodes:
2947
        result = node_data[name]
2948
        if result:
2949
          live_data.update(result)
2950
        elif result == False:
2951
          bad_nodes.append(name)
2952
        # else no instance is alive
2953
    else:
2954
      live_data = dict([(name, {}) for name in instance_names])
2955

    
2956
    # end data gathering
2957

    
2958
    HVPREFIX = "hv/"
2959
    BEPREFIX = "be/"
2960
    output = []
2961
    for instance in instance_list:
2962
      iout = []
2963
      i_hv = self.cfg.GetClusterInfo().FillHV(instance)
2964
      i_be = self.cfg.GetClusterInfo().FillBE(instance)
2965
      for field in self.op.output_fields:
2966
        st_match = self._FIELDS_STATIC.Matches(field)
2967
        if field == "name":
2968
          val = instance.name
2969
        elif field == "os":
2970
          val = instance.os
2971
        elif field == "pnode":
2972
          val = instance.primary_node
2973
        elif field == "snodes":
2974
          val = list(instance.secondary_nodes)
2975
        elif field == "admin_state":
2976
          val = (instance.status != "down")
2977
        elif field == "oper_state":
2978
          if instance.primary_node in bad_nodes:
2979
            val = None
2980
          else:
2981
            val = bool(live_data.get(instance.name))
2982
        elif field == "status":
2983
          if instance.primary_node in bad_nodes:
2984
            val = "ERROR_nodedown"
2985
          else:
2986
            running = bool(live_data.get(instance.name))
2987
            if running:
2988
              if instance.status != "down":
2989
                val = "running"
2990
              else:
2991
                val = "ERROR_up"
2992
            else:
2993
              if instance.status != "down":
2994
                val = "ERROR_down"
2995
              else:
2996
                val = "ADMIN_down"
2997
        elif field == "oper_ram":
2998
          if instance.primary_node in bad_nodes:
2999
            val = None
3000
          elif instance.name in live_data:
3001
            val = live_data[instance.name].get("memory", "?")
3002
          else:
3003
            val = "-"
3004
        elif field == "disk_template":
3005
          val = instance.disk_template
3006
        elif field == "ip":
3007
          val = instance.nics[0].ip
3008
        elif field == "bridge":
3009
          val = instance.nics[0].bridge
3010
        elif field == "mac":
3011
          val = instance.nics[0].mac
3012
        elif field == "sda_size" or field == "sdb_size":
3013
          idx = ord(field[2]) - ord('a')
3014
          try:
3015
            val = instance.FindDisk(idx).size
3016
          except errors.OpPrereqError:
3017
            val = None
3018
        elif field == "tags":
3019
          val = list(instance.GetTags())
3020
        elif field == "serial_no":
3021
          val = instance.serial_no
3022
        elif field == "network_port":
3023
          val = instance.network_port
3024
        elif field == "hypervisor":
3025
          val = instance.hypervisor
3026
        elif field == "hvparams":
3027
          val = i_hv
3028
        elif (field.startswith(HVPREFIX) and
3029
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
3030
          val = i_hv.get(field[len(HVPREFIX):], None)
3031
        elif field == "beparams":
3032
          val = i_be
3033
        elif (field.startswith(BEPREFIX) and
3034
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
3035
          val = i_be.get(field[len(BEPREFIX):], None)
3036
        elif st_match and st_match.groups():
3037
          # matches a variable list
3038
          st_groups = st_match.groups()
3039
          if st_groups and st_groups[0] == "disk":
3040
            if st_groups[1] == "count":
3041
              val = len(instance.disks)
3042
            elif st_groups[1] == "sizes":
3043
              val = [disk.size for disk in instance.disks]
3044
            elif st_groups[1] == "size":
3045
              try:
3046
                val = instance.FindDisk(st_groups[2]).size
3047
              except errors.OpPrereqError:
3048
                val = None
3049
            else:
3050
              assert False, "Unhandled disk parameter"
3051
          elif st_groups[0] == "nic":
3052
            if st_groups[1] == "count":
3053
              val = len(instance.nics)
3054
            elif st_groups[1] == "macs":
3055
              val = [nic.mac for nic in instance.nics]
3056
            elif st_groups[1] == "ips":
3057
              val = [nic.ip for nic in instance.nics]
3058
            elif st_groups[1] == "bridges":
3059
              val = [nic.bridge for nic in instance.nics]
3060
            else:
3061
              # index-based item
3062
              nic_idx = int(st_groups[2])
3063
              if nic_idx >= len(instance.nics):
3064
                val = None
3065
              else:
3066
                if st_groups[1] == "mac":
3067
                  val = instance.nics[nic_idx].mac
3068
                elif st_groups[1] == "ip":
3069
                  val = instance.nics[nic_idx].ip
3070
                elif st_groups[1] == "bridge":
3071
                  val = instance.nics[nic_idx].bridge
3072
                else:
3073
                  assert False, "Unhandled NIC parameter"
3074
          else:
3075
            assert False, "Unhandled variable parameter"
3076
        else:
3077
          raise errors.ParameterError(field)
3078
        iout.append(val)
3079
      output.append(iout)
3080

    
3081
    return output
3082

    
3083

    
3084
class LUFailoverInstance(LogicalUnit):
3085
  """Failover an instance.
3086

3087
  """
3088
  HPATH = "instance-failover"
3089
  HTYPE = constants.HTYPE_INSTANCE
3090
  _OP_REQP = ["instance_name", "ignore_consistency"]
3091
  REQ_BGL = False
3092

    
3093
  def ExpandNames(self):
3094
    self._ExpandAndLockInstance()
3095
    self.needed_locks[locking.LEVEL_NODE] = []
3096
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3097

    
3098
  def DeclareLocks(self, level):
3099
    if level == locking.LEVEL_NODE:
3100
      self._LockInstancesNodes()
3101

    
3102
  def BuildHooksEnv(self):
3103
    """Build hooks env.
3104

3105
    This runs on master, primary and secondary nodes of the instance.
3106

3107
    """
3108
    env = {
3109
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
3110
      }
3111
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3112
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3113
    return env, nl, nl
3114

    
3115
  def CheckPrereq(self):
3116
    """Check prerequisites.
3117

3118
    This checks that the instance is in the cluster.
3119

3120
    """
3121
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3122
    assert self.instance is not None, \
3123
      "Cannot retrieve locked instance %s" % self.op.instance_name
3124

    
3125
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3126
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3127
      raise errors.OpPrereqError("Instance's disk layout is not"
3128
                                 " network mirrored, cannot failover.")
3129

    
3130
    secondary_nodes = instance.secondary_nodes
3131
    if not secondary_nodes:
3132
      raise errors.ProgrammerError("no secondary node but using "
3133
                                   "a mirrored disk template")
3134

    
3135
    target_node = secondary_nodes[0]
3136
    # check memory requirements on the secondary node
3137
    _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
3138
                         instance.name, bep[constants.BE_MEMORY],
3139
                         instance.hypervisor)
3140

    
3141
    # check bridge existance
3142
    brlist = [nic.bridge for nic in instance.nics]
3143
    if not self.rpc.call_bridges_exist(target_node, brlist):
3144
      raise errors.OpPrereqError("One or more target bridges %s does not"
3145
                                 " exist on destination node '%s'" %
3146
                                 (brlist, target_node))
3147

    
3148
  def Exec(self, feedback_fn):
3149
    """Failover an instance.
3150

3151
    The failover is done by shutting it down on its present node and
3152
    starting it on the secondary.
3153

3154
    """
3155
    instance = self.instance
3156

    
3157
    source_node = instance.primary_node
3158
    target_node = instance.secondary_nodes[0]
3159

    
3160
    feedback_fn("* checking disk consistency between source and target")
3161
    for dev in instance.disks:
3162
      # for drbd, these are drbd over lvm
3163
      if not _CheckDiskConsistency(self, dev, target_node, False):
3164
        if instance.status == "up" and not self.op.ignore_consistency:
3165
          raise errors.OpExecError("Disk %s is degraded on target node,"
3166
                                   " aborting failover." % dev.iv_name)
3167

    
3168
    feedback_fn("* shutting down instance on source node")
3169
    logging.info("Shutting down instance %s on node %s",
3170
                 instance.name, source_node)
3171

    
3172
    if not self.rpc.call_instance_shutdown(source_node, instance):
3173
      if self.op.ignore_consistency:
3174
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
3175
                             " Proceeding"
3176
                             " anyway. Please make sure node %s is down",
3177
                             instance.name, source_node, source_node)
3178
      else:
3179
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
3180
                                 (instance.name, source_node))
3181

    
3182
    feedback_fn("* deactivating the instance's disks on source node")
3183
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3184
      raise errors.OpExecError("Can't shut down the instance's disks.")
3185

    
3186
    instance.primary_node = target_node
3187
    # distribute new instance config to the other nodes
3188
    self.cfg.Update(instance)
3189

    
3190
    # Only start the instance if it's marked as up
3191
    if instance.status == "up":
3192
      feedback_fn("* activating the instance's disks on target node")
3193
      logging.info("Starting instance %s on node %s",
3194
                   instance.name, target_node)
3195

    
3196
      disks_ok, dummy = _AssembleInstanceDisks(self, instance,
3197
                                               ignore_secondaries=True)
3198
      if not disks_ok:
3199
        _ShutdownInstanceDisks(self, instance)
3200
        raise errors.OpExecError("Can't activate the instance's disks")
3201

    
3202
      feedback_fn("* starting the instance on the target node")
3203
      if not self.rpc.call_instance_start(target_node, instance, None):
3204
        _ShutdownInstanceDisks(self, instance)
3205
        raise errors.OpExecError("Could not start instance %s on node %s." %
3206
                                 (instance.name, target_node))
3207

    
3208

    
3209
def _CreateBlockDevOnPrimary(lu, node, instance, device, info):
3210
  """Create a tree of block devices on the primary node.
3211

3212
  This always creates all devices.
3213

3214
  """
3215
  if device.children:
3216
    for child in device.children:
3217
      if not _CreateBlockDevOnPrimary(lu, node, instance, child, info):
3218
        return False
3219

    
3220
  lu.cfg.SetDiskID(device, node)
3221
  new_id = lu.rpc.call_blockdev_create(node, device, device.size,
3222
                                       instance.name, True, info)
3223
  if not new_id:
3224
    return False
3225
  if device.physical_id is None:
3226
    device.physical_id = new_id
3227
  return True
3228

    
3229

    
3230
def _CreateBlockDevOnSecondary(lu, node, instance, device, force, info):
3231
  """Create a tree of block devices on a secondary node.
3232

3233
  If this device type has to be created on secondaries, create it and
3234
  all its children.
3235

3236
  If not, just recurse to children keeping the same 'force' value.
3237

3238
  """
3239
  if device.CreateOnSecondary():
3240
    force = True
3241
  if device.children:
3242
    for child in device.children:
3243
      if not _CreateBlockDevOnSecondary(lu, node, instance,
3244
                                        child, force, info):
3245
        return False
3246

    
3247
  if not force:
3248
    return True
3249
  lu.cfg.SetDiskID(device, node)
3250
  new_id = lu.rpc.call_blockdev_create(node, device, device.size,
3251
                                       instance.name, False, info)
3252
  if not new_id:
3253
    return False
3254
  if device.physical_id is None:
3255
    device.physical_id = new_id
3256
  return True
3257

    
3258

    
3259
def _GenerateUniqueNames(lu, exts):
3260
  """Generate a suitable LV name.
3261

3262
  This will generate a logical volume name for the given instance.
3263

3264
  """
3265
  results = []
3266
  for val in exts:
3267
    new_id = lu.cfg.GenerateUniqueID()
3268
    results.append("%s%s" % (new_id, val))
3269
  return results
3270

    
3271

    
3272
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
3273
                         p_minor, s_minor):
3274
  """Generate a drbd8 device complete with its children.
3275

3276
  """
3277
  port = lu.cfg.AllocatePort()
3278
  vgname = lu.cfg.GetVGName()
3279
  shared_secret = lu.cfg.GenerateDRBDSecret()
3280
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3281
                          logical_id=(vgname, names[0]))
3282
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3283
                          logical_id=(vgname, names[1]))
3284
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
3285
                          logical_id=(primary, secondary, port,
3286
                                      p_minor, s_minor,
3287
                                      shared_secret),
3288
                          children=[dev_data, dev_meta],
3289
                          iv_name=iv_name)
3290
  return drbd_dev
3291

    
3292

    
3293
def _GenerateDiskTemplate(lu, template_name,
3294
                          instance_name, primary_node,
3295
                          secondary_nodes, disk_info,
3296
                          file_storage_dir, file_driver,
3297
                          base_index):
3298
  """Generate the entire disk layout for a given template type.
3299

3300
  """
3301
  #TODO: compute space requirements
3302

    
3303
  vgname = lu.cfg.GetVGName()
3304
  disk_count = len(disk_info)
3305
  disks = []
3306
  if template_name == constants.DT_DISKLESS:
3307
    pass
3308
  elif template_name == constants.DT_PLAIN:
3309
    if len(secondary_nodes) != 0:
3310
      raise errors.ProgrammerError("Wrong template configuration")
3311

    
3312
    names = _GenerateUniqueNames(lu, [".disk%d" % i
3313
                                      for i in range(disk_count)])
3314
    for idx, disk in enumerate(disk_info):
3315
      disk_index = idx + base_index
3316
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
3317
                              logical_id=(vgname, names[idx]),
3318
                              iv_name="disk/%d" % disk_index)
3319
      disks.append(disk_dev)
3320
  elif template_name == constants.DT_DRBD8:
3321
    if len(secondary_nodes) != 1:
3322
      raise errors.ProgrammerError("Wrong template configuration")
3323
    remote_node = secondary_nodes[0]
3324
    minors = lu.cfg.AllocateDRBDMinor(
3325
      [primary_node, remote_node] * len(disk_info), instance_name)
3326

    
3327
    names = _GenerateUniqueNames(lu,
3328
                                 [".disk%d_%s" % (i, s)
3329
                                  for i in range(disk_count)
3330
                                  for s in ("data", "meta")
3331
                                  ])
3332
    for idx, disk in enumerate(disk_info):
3333
      disk_index = idx + base_index
3334
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
3335
                                      disk["size"], names[idx*2:idx*2+2],
3336
                                      "disk/%d" % disk_index,
3337
                                      minors[idx*2], minors[idx*2+1])
3338
      disks.append(disk_dev)
3339
  elif template_name == constants.DT_FILE:
3340
    if len(secondary_nodes) != 0:
3341
      raise errors.ProgrammerError("Wrong template configuration")
3342

    
3343
    for idx, disk in enumerate(disk_info):
3344
      disk_index = idx + base_index
3345
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
3346
                              iv_name="disk/%d" % disk_index,
3347
                              logical_id=(file_driver,
3348
                                          "%s/disk%d" % (file_storage_dir,
3349
                                                         idx)))
3350
      disks.append(disk_dev)
3351
  else:
3352
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
3353
  return disks
3354

    
3355

    
3356
def _GetInstanceInfoText(instance):
3357
  """Compute that text that should be added to the disk's metadata.
3358

3359
  """
3360
  return "originstname+%s" % instance.name
3361

    
3362

    
3363
def _CreateDisks(lu, instance):
3364
  """Create all disks for an instance.
3365

3366
  This abstracts away some work from AddInstance.
3367

3368
  @type lu: L{LogicalUnit}
3369
  @param lu: the logical unit on whose behalf we execute
3370
  @type instance: L{objects.Instance}
3371
  @param instance: the instance whose disks we should create
3372
  @rtype: boolean
3373
  @return: the success of the creation
3374

3375
  """
3376
  info = _GetInstanceInfoText(instance)
3377

    
3378
  if instance.disk_template == constants.DT_FILE:
3379
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3380
    result = lu.rpc.call_file_storage_dir_create(instance.primary_node,
3381
                                                 file_storage_dir)
3382

    
3383
    if not result:
3384
      logging.error("Could not connect to node '%s'", instance.primary_node)
3385
      return False
3386

    
3387
    if not result[0]:
3388
      logging.error("Failed to create directory '%s'", file_storage_dir)
3389
      return False
3390

    
3391
  # Note: this needs to be kept in sync with adding of disks in
3392
  # LUSetInstanceParams
3393
  for device in instance.disks:
3394
    logging.info("Creating volume %s for instance %s",
3395
                 device.iv_name, instance.name)
3396
    #HARDCODE
3397
    for secondary_node in instance.secondary_nodes:
3398
      if not _CreateBlockDevOnSecondary(lu, secondary_node, instance,
3399
                                        device, False, info):
3400
        logging.error("Failed to create volume %s (%s) on secondary node %s!",
3401
                      device.iv_name, device, secondary_node)
3402
        return False
3403
    #HARDCODE
3404
    if not _CreateBlockDevOnPrimary(lu, instance.primary_node,
3405
                                    instance, device, info):
3406
      logging.error("Failed to create volume %s on primary!", device.iv_name)
3407
      return False
3408

    
3409
  return True
3410

    
3411

    
3412
def _RemoveDisks(lu, instance):
3413
  """Remove all disks for an instance.
3414

3415
  This abstracts away some work from `AddInstance()` and
3416
  `RemoveInstance()`. Note that in case some of the devices couldn't
3417
  be removed, the removal will continue with the other ones (compare
3418
  with `_CreateDisks()`).
3419

3420
  @type lu: L{LogicalUnit}
3421
  @param lu: the logical unit on whose behalf we execute
3422
  @type instance: L{objects.Instance}
3423
  @param instance: the instance whose disks we should remove
3424
  @rtype: boolean
3425
  @return: the success of the removal
3426

3427
  """
3428
  logging.info("Removing block devices for instance %s", instance.name)
3429

    
3430
  result = True
3431
  for device in instance.disks:
3432
    for node, disk in device.ComputeNodeTree(instance.primary_node):
3433
      lu.cfg.SetDiskID(disk, node)
3434
      if not lu.rpc.call_blockdev_remove(node, disk):
3435
        lu.proc.LogWarning("Could not remove block device %s on node %s,"
3436
                           " continuing anyway", device.iv_name, node)
3437
        result = False
3438

    
3439
  if instance.disk_template == constants.DT_FILE:
3440
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3441
    if not lu.rpc.call_file_storage_dir_remove(instance.primary_node,
3442
                                               file_storage_dir):
3443
      logging.error("Could not remove directory '%s'", file_storage_dir)
3444
      result = False
3445

    
3446
  return result
3447

    
3448

    
3449
def _ComputeDiskSize(disk_template, disks):
3450
  """Compute disk size requirements in the volume group
3451

3452
  """
3453
  # Required free disk space as a function of disk and swap space
3454
  req_size_dict = {
3455
    constants.DT_DISKLESS: None,
3456
    constants.DT_PLAIN: sum(d["size"] for d in disks),
3457
    # 128 MB are added for drbd metadata for each disk
3458
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
3459
    constants.DT_FILE: None,
3460
  }
3461

    
3462
  if disk_template not in req_size_dict:
3463
    raise errors.ProgrammerError("Disk template '%s' size requirement"
3464
                                 " is unknown" %  disk_template)
3465

    
3466
  return req_size_dict[disk_template]
3467

    
3468

    
3469
def _CheckHVParams(lu, nodenames, hvname, hvparams):
3470
  """Hypervisor parameter validation.
3471

3472
  This function abstract the hypervisor parameter validation to be
3473
  used in both instance create and instance modify.
3474

3475
  @type lu: L{LogicalUnit}
3476
  @param lu: the logical unit for which we check
3477
  @type nodenames: list
3478
  @param nodenames: the list of nodes on which we should check
3479
  @type hvname: string
3480
  @param hvname: the name of the hypervisor we should use
3481
  @type hvparams: dict
3482
  @param hvparams: the parameters which we need to check
3483
  @raise errors.OpPrereqError: if the parameters are not valid
3484

3485
  """
3486
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
3487
                                                  hvname,
3488
                                                  hvparams)
3489
  for node in nodenames:
3490
    info = hvinfo.get(node, None)
3491
    if not info or not isinstance(info, (tuple, list)):
3492
      raise errors.OpPrereqError("Cannot get current information"
3493
                                 " from node '%s' (%s)" % (node, info))
3494
    if not info[0]:
3495
      raise errors.OpPrereqError("Hypervisor parameter validation failed:"
3496
                                 " %s" % info[1])
3497

    
3498

    
3499
class LUCreateInstance(LogicalUnit):
3500
  """Create an instance.
3501

3502
  """
3503
  HPATH = "instance-add"
3504
  HTYPE = constants.HTYPE_INSTANCE
3505
  _OP_REQP = ["instance_name", "disks", "disk_template",
3506
              "mode", "start",
3507
              "wait_for_sync", "ip_check", "nics",
3508
              "hvparams", "beparams"]
3509
  REQ_BGL = False
3510

    
3511
  def _ExpandNode(self, node):
3512
    """Expands and checks one node name.
3513

3514
    """
3515
    node_full = self.cfg.ExpandNodeName(node)
3516
    if node_full is None:
3517
      raise errors.OpPrereqError("Unknown node %s" % node)
3518
    return node_full
3519

    
3520
  def ExpandNames(self):
3521
    """ExpandNames for CreateInstance.
3522

3523
    Figure out the right locks for instance creation.
3524

3525
    """
3526
    self.needed_locks = {}
3527

    
3528
    # set optional parameters to none if they don't exist
3529
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
3530
      if not hasattr(self.op, attr):
3531
        setattr(self.op, attr, None)
3532

    
3533
    # cheap checks, mostly valid constants given
3534

    
3535
    # verify creation mode
3536
    if self.op.mode not in (constants.INSTANCE_CREATE,
3537
                            constants.INSTANCE_IMPORT):
3538
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
3539
                                 self.op.mode)
3540

    
3541
    # disk template and mirror node verification
3542
    if self.op.disk_template not in constants.DISK_TEMPLATES:
3543
      raise errors.OpPrereqError("Invalid disk template name")
3544

    
3545
    if self.op.hypervisor is None:
3546
      self.op.hypervisor = self.cfg.GetHypervisorType()
3547

    
3548
    cluster = self.cfg.GetClusterInfo()
3549
    enabled_hvs = cluster.enabled_hypervisors
3550
    if self.op.hypervisor not in enabled_hvs:
3551
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
3552
                                 " cluster (%s)" % (self.op.hypervisor,
3553
                                  ",".join(enabled_hvs)))
3554

    
3555
    # check hypervisor parameter syntax (locally)
3556

    
3557
    filled_hvp = cluster.FillDict(cluster.hvparams[self.op.hypervisor],
3558
                                  self.op.hvparams)
3559
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
3560
    hv_type.CheckParameterSyntax(filled_hvp)
3561

    
3562
    # fill and remember the beparams dict
3563
    self.be_full = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
3564
                                    self.op.beparams)
3565

    
3566
    #### instance parameters check
3567

    
3568
    # instance name verification
3569
    hostname1 = utils.HostInfo(self.op.instance_name)
3570
    self.op.instance_name = instance_name = hostname1.name
3571

    
3572
    # this is just a preventive check, but someone might still add this
3573
    # instance in the meantime, and creation will fail at lock-add time
3574
    if instance_name in self.cfg.GetInstanceList():
3575
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3576
                                 instance_name)
3577

    
3578
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
3579

    
3580
    # NIC buildup
3581
    self.nics = []
3582
    for nic in self.op.nics:
3583
      # ip validity checks
3584
      ip = nic.get("ip", None)
3585
      if ip is None or ip.lower() == "none":
3586
        nic_ip = None
3587
      elif ip.lower() == constants.VALUE_AUTO:
3588
        nic_ip = hostname1.ip
3589
      else:
3590
        if not utils.IsValidIP(ip):
3591
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
3592
                                     " like a valid IP" % ip)
3593
        nic_ip = ip
3594

    
3595
      # MAC address verification
3596
      mac = nic.get("mac", constants.VALUE_AUTO)
3597
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
3598
        if not utils.IsValidMac(mac.lower()):
3599
          raise errors.OpPrereqError("Invalid MAC address specified: %s" %
3600
                                     mac)
3601
      # bridge verification
3602
      bridge = nic.get("bridge", self.cfg.GetDefBridge())
3603
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, bridge=bridge))
3604

    
3605
    # disk checks/pre-build
3606
    self.disks = []
3607
    for disk in self.op.disks:
3608
      mode = disk.get("mode", constants.DISK_RDWR)
3609
      if mode not in constants.DISK_ACCESS_SET:
3610
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
3611
                                   mode)
3612
      size = disk.get("size", None)
3613
      if size is None:
3614
        raise errors.OpPrereqError("Missing disk size")
3615
      try:
3616
        size = int(size)
3617
      except ValueError:
3618
        raise errors.OpPrereqError("Invalid disk size '%s'" % size)
3619
      self.disks.append({"size": size, "mode": mode})
3620

    
3621
    # used in CheckPrereq for ip ping check
3622
    self.check_ip = hostname1.ip
3623

    
3624
    # file storage checks
3625
    if (self.op.file_driver and
3626
        not self.op.file_driver in constants.FILE_DRIVER):
3627
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
3628
                                 self.op.file_driver)
3629

    
3630
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
3631
      raise errors.OpPrereqError("File storage directory path not absolute")
3632

    
3633
    ### Node/iallocator related checks
3634
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
3635
      raise errors.OpPrereqError("One and only one of iallocator and primary"
3636
                                 " node must be given")
3637

    
3638
    if self.op.iallocator:
3639
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3640
    else:
3641
      self.op.pnode = self._ExpandNode(self.op.pnode)
3642
      nodelist = [self.op.pnode]
3643
      if self.op.snode is not None:
3644
        self.op.snode = self._ExpandNode(self.op.snode)
3645
        nodelist.append(self.op.snode)
3646
      self.needed_locks[locking.LEVEL_NODE] = nodelist
3647

    
3648
    # in case of import lock the source node too
3649
    if self.op.mode == constants.INSTANCE_IMPORT:
3650
      src_node = getattr(self.op, "src_node", None)
3651
      src_path = getattr(self.op, "src_path", None)
3652

    
3653
      if src_node is None or src_path is None:
3654
        raise errors.OpPrereqError("Importing an instance requires source"
3655
                                   " node and path options")
3656

    
3657
      if not os.path.isabs(src_path):
3658
        raise errors.OpPrereqError("The source path must be absolute")
3659

    
3660
      self.op.src_node = src_node = self._ExpandNode(src_node)
3661
      if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
3662
        self.needed_locks[locking.LEVEL_NODE].append(src_node)
3663

    
3664
    else: # INSTANCE_CREATE
3665
      if getattr(self.op, "os_type", None) is None:
3666
        raise errors.OpPrereqError("No guest OS specified")
3667

    
3668
  def _RunAllocator(self):
3669
    """Run the allocator based on input opcode.
3670

3671
    """
3672
    nics = [n.ToDict() for n in self.nics]
3673
    ial = IAllocator(self,
3674
                     mode=constants.IALLOCATOR_MODE_ALLOC,
3675
                     name=self.op.instance_name,
3676
                     disk_template=self.op.disk_template,
3677
                     tags=[],
3678
                     os=self.op.os_type,
3679
                     vcpus=self.be_full[constants.BE_VCPUS],
3680
                     mem_size=self.be_full[constants.BE_MEMORY],
3681
                     disks=self.disks,
3682
                     nics=nics,
3683
                     hypervisor=self.op.hypervisor,
3684
                     )
3685

    
3686
    ial.Run(self.op.iallocator)
3687

    
3688
    if not ial.success:
3689
      raise errors.OpPrereqError("Can't compute nodes using"
3690
                                 " iallocator '%s': %s" % (self.op.iallocator,
3691
                                                           ial.info))
3692
    if len(ial.nodes) != ial.required_nodes:
3693
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3694
                                 " of nodes (%s), required %s" %
3695
                                 (self.op.iallocator, len(ial.nodes),
3696
                                  ial.required_nodes))
3697
    self.op.pnode = ial.nodes[0]
3698
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
3699
                 self.op.instance_name, self.op.iallocator,
3700
                 ", ".join(ial.nodes))
3701
    if ial.required_nodes == 2:
3702
      self.op.snode = ial.nodes[1]
3703

    
3704
  def BuildHooksEnv(self):
3705
    """Build hooks env.
3706

3707
    This runs on master, primary and secondary nodes of the instance.
3708

3709
    """
3710
    env = {
3711
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
3712
      "INSTANCE_DISK_SIZE": ",".join(str(d["size"]) for d in self.disks),
3713
      "INSTANCE_ADD_MODE": self.op.mode,
3714
      }
3715
    if self.op.mode == constants.INSTANCE_IMPORT:
3716
      env["INSTANCE_SRC_NODE"] = self.op.src_node
3717
      env["INSTANCE_SRC_PATH"] = self.op.src_path
3718
      env["INSTANCE_SRC_IMAGES"] = self.src_images
3719

    
3720
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
3721
      primary_node=self.op.pnode,
3722
      secondary_nodes=self.secondaries,
3723
      status=self.instance_status,
3724
      os_type=self.op.os_type,
3725
      memory=self.be_full[constants.BE_MEMORY],
3726
      vcpus=self.be_full[constants.BE_VCPUS],
3727
      nics=[(n.ip, n.bridge, n.mac) for n in self.nics],
3728
    ))
3729

    
3730
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
3731
          self.secondaries)
3732
    return env, nl, nl
3733

    
3734

    
3735
  def CheckPrereq(self):
3736
    """Check prerequisites.
3737

3738
    """
3739
    if (not self.cfg.GetVGName() and
3740
        self.op.disk_template not in constants.DTS_NOT_LVM):
3741
      raise errors.OpPrereqError("Cluster does not support lvm-based"
3742
                                 " instances")
3743

    
3744

    
3745
    if self.op.mode == constants.INSTANCE_IMPORT:
3746
      src_node = self.op.src_node
3747
      src_path = self.op.src_path
3748

    
3749
      export_info = self.rpc.call_export_info(src_node, src_path)
3750

    
3751
      if not export_info:
3752
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
3753

    
3754
      if not export_info.has_section(constants.INISECT_EXP):
3755
        raise errors.ProgrammerError("Corrupted export config")
3756

    
3757
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
3758
      if (int(ei_version) != constants.EXPORT_VERSION):
3759
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
3760
                                   (ei_version, constants.EXPORT_VERSION))
3761

    
3762
      # Check that the new instance doesn't have less disks than the export
3763
      instance_disks = len(self.disks)
3764
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
3765
      if instance_disks < export_disks:
3766
        raise errors.OpPrereqError("Not enough disks to import."
3767
                                   " (instance: %d, export: %d)" %
3768
                                   (instance_disks, export_disks))
3769

    
3770
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
3771
      disk_images = []
3772
      for idx in range(export_disks):
3773
        option = 'disk%d_dump' % idx
3774
        if export_info.has_option(constants.INISECT_INS, option):
3775
          # FIXME: are the old os-es, disk sizes, etc. useful?
3776
          export_name = export_info.get(constants.INISECT_INS, option)
3777
          image = os.path.join(src_path, export_name)
3778
          disk_images.append(image)
3779
        else:
3780
          disk_images.append(False)
3781

    
3782
      self.src_images = disk_images
3783

    
3784
      old_name = export_info.get(constants.INISECT_INS, 'name')
3785
      # FIXME: int() here could throw a ValueError on broken exports
3786
      exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
3787
      if self.op.instance_name == old_name:
3788
        for idx, nic in enumerate(self.nics):
3789
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
3790
            nic_mac_ini = 'nic%d_mac' % idx
3791
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
3792

    
3793
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
3794
    if self.op.start and not self.op.ip_check:
3795
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
3796
                                 " adding an instance in start mode")
3797

    
3798
    if self.op.ip_check:
3799
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
3800
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3801
                                   (self.check_ip, self.op.instance_name))
3802

    
3803
    #### allocator run
3804

    
3805
    if self.op.iallocator is not None:
3806
      self._RunAllocator()
3807

    
3808
    #### node related checks
3809

    
3810
    # check primary node
3811
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
3812
    assert self.pnode is not None, \
3813
      "Cannot retrieve locked node %s" % self.op.pnode
3814
    self.secondaries = []
3815

    
3816
    # mirror node verification
3817
    if self.op.disk_template in constants.DTS_NET_MIRROR:
3818
      if self.op.snode is None:
3819
        raise errors.OpPrereqError("The networked disk templates need"
3820
                                   " a mirror node")
3821
      if self.op.snode == pnode.name:
3822
        raise errors.OpPrereqError("The secondary node cannot be"
3823
                                   " the primary node.")
3824
      self.secondaries.append(self.op.snode)
3825

    
3826
    nodenames = [pnode.name] + self.secondaries
3827

    
3828
    req_size = _ComputeDiskSize(self.op.disk_template,
3829
                                self.disks)
3830

    
3831
    # Check lv size requirements
3832
    if req_size is not None:
3833
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
3834
                                         self.op.hypervisor)
3835
      for node in nodenames:
3836
        info = nodeinfo.get(node, None)
3837
        if not info:
3838
          raise errors.OpPrereqError("Cannot get current information"
3839
                                     " from node '%s'" % node)
3840
        vg_free = info.get('vg_free', None)
3841
        if not isinstance(vg_free, int):
3842
          raise errors.OpPrereqError("Can't compute free disk space on"
3843
                                     " node %s" % node)
3844
        if req_size > info['vg_free']:
3845
          raise errors.OpPrereqError("Not enough disk space on target node %s."
3846
                                     " %d MB available, %d MB required" %
3847
                                     (node, info['vg_free'], req_size))
3848

    
3849
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
3850

    
3851
    # os verification
3852
    os_obj = self.rpc.call_os_get(pnode.name, self.op.os_type)
3853
    if not os_obj:
3854
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
3855
                                 " primary node"  % self.op.os_type)
3856

    
3857
    # bridge check on primary node
3858
    bridges = [n.bridge for n in self.nics]
3859
    if not self.rpc.call_bridges_exist(self.pnode.name, bridges):
3860
      raise errors.OpPrereqError("one of the target bridges '%s' does not"
3861
                                 " exist on"
3862
                                 " destination node '%s'" %
3863
                                 (",".join(bridges), pnode.name))
3864

    
3865
    # memory check on primary node
3866
    if self.op.start:
3867
      _CheckNodeFreeMemory(self, self.pnode.name,
3868
                           "creating instance %s" % self.op.instance_name,
3869
                           self.be_full[constants.BE_MEMORY],
3870
                           self.op.hypervisor)
3871

    
3872
    if self.op.start:
3873
      self.instance_status = 'up'
3874
    else:
3875
      self.instance_status = 'down'
3876

    
3877
  def Exec(self, feedback_fn):
3878
    """Create and add the instance to the cluster.
3879

3880
    """
3881
    instance = self.op.instance_name
3882
    pnode_name = self.pnode.name
3883

    
3884
    for nic in self.nics:
3885
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
3886
        nic.mac = self.cfg.GenerateMAC()
3887

    
3888
    ht_kind = self.op.hypervisor
3889
    if ht_kind in constants.HTS_REQ_PORT:
3890
      network_port = self.cfg.AllocatePort()
3891
    else:
3892
      network_port = None
3893

    
3894
    ##if self.op.vnc_bind_address is None:
3895
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
3896

    
3897
    # this is needed because os.path.join does not accept None arguments
3898
    if self.op.file_storage_dir is None:
3899
      string_file_storage_dir = ""
3900
    else:
3901
      string_file_storage_dir = self.op.file_storage_dir
3902

    
3903
    # build the full file storage dir path
3904
    file_storage_dir = os.path.normpath(os.path.join(
3905
                                        self.cfg.GetFileStorageDir(),
3906
                                        string_file_storage_dir, instance))
3907

    
3908

    
3909
    disks = _GenerateDiskTemplate(self,
3910
                                  self.op.disk_template,
3911
                                  instance, pnode_name,
3912
                                  self.secondaries,
3913
                                  self.disks,
3914
                                  file_storage_dir,
3915
                                  self.op.file_driver,
3916
                                  0)
3917

    
3918
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3919
                            primary_node=pnode_name,
3920
                            nics=self.nics, disks=disks,
3921
                            disk_template=self.op.disk_template,
3922
                            status=self.instance_status,
3923
                            network_port=network_port,
3924
                            beparams=self.op.beparams,
3925
                            hvparams=self.op.hvparams,
3926
                            hypervisor=self.op.hypervisor,
3927
                            )
3928

    
3929
    feedback_fn("* creating instance disks...")
3930
    if not _CreateDisks(self, iobj):
3931
      _RemoveDisks(self, iobj)
3932
      self.cfg.ReleaseDRBDMinors(instance)
3933
      raise errors.OpExecError("Device creation failed, reverting...")
3934

    
3935
    feedback_fn("adding instance %s to cluster config" % instance)
3936

    
3937
    self.cfg.AddInstance(iobj)
3938
    # Declare that we don't want to remove the instance lock anymore, as we've
3939
    # added the instance to the config
3940
    del self.remove_locks[locking.LEVEL_INSTANCE]
3941
    # Remove the temp. assignements for the instance's drbds
3942
    self.cfg.ReleaseDRBDMinors(instance)
3943
    # Unlock all the nodes
3944
    self.context.glm.release(locking.LEVEL_NODE)
3945
    del self.acquired_locks[locking.LEVEL_NODE]
3946

    
3947
    if self.op.wait_for_sync:
3948
      disk_abort = not _WaitForSync(self, iobj)
3949
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3950
      # make sure the disks are not degraded (still sync-ing is ok)
3951
      time.sleep(15)
3952
      feedback_fn("* checking mirrors status")
3953
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
3954
    else:
3955
      disk_abort = False
3956

    
3957
    if disk_abort:
3958
      _RemoveDisks(self, iobj)
3959
      self.cfg.RemoveInstance(iobj.name)
3960
      # Make sure the instance lock gets removed
3961
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
3962
      raise errors.OpExecError("There are some degraded disks for"
3963
                               " this instance")
3964

    
3965
    feedback_fn("creating os for instance %s on node %s" %
3966
                (instance, pnode_name))
3967

    
3968
    if iobj.disk_template != constants.DT_DISKLESS:
3969
      if self.op.mode == constants.INSTANCE_CREATE:
3970
        feedback_fn("* running the instance OS create scripts...")
3971
        if not self.rpc.call_instance_os_add(pnode_name, iobj):
3972
          raise errors.OpExecError("could not add os for instance %s"
3973
                                   " on node %s" %
3974
                                   (instance, pnode_name))
3975

    
3976
      elif self.op.mode == constants.INSTANCE_IMPORT:
3977
        feedback_fn("* running the instance OS import scripts...")
3978
        src_node = self.op.src_node
3979
        src_images = self.src_images
3980
        cluster_name = self.cfg.GetClusterName()
3981
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
3982
                                                         src_node, src_images,
3983
                                                         cluster_name)
3984
        for idx, result in enumerate(import_result):
3985
          if not result:
3986
            self.LogWarning("Could not import the image %s for instance"
3987
                            " %s, disk %d, on node %s" %
3988
                            (src_images[idx], instance, idx, pnode_name))
3989
      else:
3990
        # also checked in the prereq part
3991
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3992
                                     % self.op.mode)
3993

    
3994
    if self.op.start:
3995
      logging.info("Starting instance %s on node %s", instance, pnode_name)
3996
      feedback_fn("* starting instance...")
3997
      if not self.rpc.call_instance_start(pnode_name, iobj, None):
3998
        raise errors.OpExecError("Could not start instance")
3999

    
4000

    
4001
class LUConnectConsole(NoHooksLU):
4002
  """Connect to an instance's console.
4003

4004
  This is somewhat special in that it returns the command line that
4005
  you need to run on the master node in order to connect to the
4006
  console.
4007

4008
  """
4009
  _OP_REQP = ["instance_name"]
4010
  REQ_BGL = False
4011

    
4012
  def ExpandNames(self):
4013
    self._ExpandAndLockInstance()
4014

    
4015
  def CheckPrereq(self):
4016
    """Check prerequisites.
4017

4018
    This checks that the instance is in the cluster.
4019

4020
    """
4021
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4022
    assert self.instance is not None, \
4023
      "Cannot retrieve locked instance %s" % self.op.instance_name
4024

    
4025
  def Exec(self, feedback_fn):
4026
    """Connect to the console of an instance
4027

4028
    """
4029
    instance = self.instance
4030
    node = instance.primary_node
4031

    
4032
    node_insts = self.rpc.call_instance_list([node],
4033
                                             [instance.hypervisor])[node]
4034
    if node_insts is False:
4035
      raise errors.OpExecError("Can't connect to node %s." % node)
4036

    
4037
    if instance.name not in node_insts:
4038
      raise errors.OpExecError("Instance %s is not running." % instance.name)
4039

    
4040
    logging.debug("Connecting to console of %s on %s", instance.name, node)
4041

    
4042
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
4043
    console_cmd = hyper.GetShellCommandForConsole(instance)
4044

    
4045
    # build ssh cmdline
4046
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
4047

    
4048

    
4049
class LUReplaceDisks(LogicalUnit):
4050
  """Replace the disks of an instance.
4051

4052
  """
4053
  HPATH = "mirrors-replace"
4054
  HTYPE = constants.HTYPE_INSTANCE
4055
  _OP_REQP = ["instance_name", "mode", "disks"]
4056
  REQ_BGL = False
4057

    
4058
  def ExpandNames(self):
4059
    self._ExpandAndLockInstance()
4060

    
4061
    if not hasattr(self.op, "remote_node"):
4062
      self.op.remote_node = None
4063

    
4064
    ia_name = getattr(self.op, "iallocator", None)
4065
    if ia_name is not None:
4066
      if self.op.remote_node is not None:
4067
        raise errors.OpPrereqError("Give either the iallocator or the new"
4068
                                   " secondary, not both")
4069
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4070
    elif self.op.remote_node is not None:
4071
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
4072
      if remote_node is None:
4073
        raise errors.OpPrereqError("Node '%s' not known" %
4074
                                   self.op.remote_node)
4075
      self.op.remote_node = remote_node
4076
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
4077
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
4078
    else:
4079
      self.needed_locks[locking.LEVEL_NODE] = []
4080
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4081

    
4082
  def DeclareLocks(self, level):
4083
    # If we're not already locking all nodes in the set we have to declare the
4084
    # instance's primary/secondary nodes.
4085
    if (level == locking.LEVEL_NODE and
4086
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
4087
      self._LockInstancesNodes()
4088

    
4089
  def _RunAllocator(self):
4090
    """Compute a new secondary node using an IAllocator.
4091

4092
    """
4093
    ial = IAllocator(self,
4094
                     mode=constants.IALLOCATOR_MODE_RELOC,
4095
                     name=self.op.instance_name,
4096
                     relocate_from=[self.sec_node])
4097

    
4098
    ial.Run(self.op.iallocator)
4099

    
4100
    if not ial.success:
4101
      raise errors.OpPrereqError("Can't compute nodes using"
4102
                                 " iallocator '%s': %s" % (self.op.iallocator,
4103
                                                           ial.info))
4104
    if len(ial.nodes) != ial.required_nodes:
4105
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4106
                                 " of nodes (%s), required %s" %
4107
                                 (len(ial.nodes), ial.required_nodes))
4108
    self.op.remote_node = ial.nodes[0]
4109
    self.LogInfo("Selected new secondary for the instance: %s",
4110
                 self.op.remote_node)
4111

    
4112
  def BuildHooksEnv(self):
4113
    """Build hooks env.
4114

4115
    This runs on the master, the primary and all the secondaries.
4116

4117
    """
4118
    env = {
4119
      "MODE": self.op.mode,
4120
      "NEW_SECONDARY": self.op.remote_node,
4121
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
4122
      }
4123
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4124
    nl = [
4125
      self.cfg.GetMasterNode(),
4126
      self.instance.primary_node,
4127
      ]
4128
    if self.op.remote_node is not None:
4129
      nl.append(self.op.remote_node)
4130
    return env, nl, nl
4131

    
4132
  def CheckPrereq(self):
4133
    """Check prerequisites.
4134

4135
    This checks that the instance is in the cluster.
4136

4137
    """
4138
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4139
    assert instance is not None, \
4140
      "Cannot retrieve locked instance %s" % self.op.instance_name
4141
    self.instance = instance
4142

    
4143
    if instance.disk_template not in constants.DTS_NET_MIRROR:
4144
      raise errors.OpPrereqError("Instance's disk layout is not"
4145
                                 " network mirrored.")
4146

    
4147
    if len(instance.secondary_nodes) != 1:
4148
      raise errors.OpPrereqError("The instance has a strange layout,"
4149
                                 " expected one secondary but found %d" %
4150
                                 len(instance.secondary_nodes))
4151

    
4152
    self.sec_node = instance.secondary_nodes[0]
4153

    
4154
    ia_name = getattr(self.op, "iallocator", None)
4155
    if ia_name is not None:
4156
      self._RunAllocator()
4157

    
4158
    remote_node = self.op.remote_node
4159
    if remote_node is not None:
4160
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
4161
      assert self.remote_node_info is not None, \
4162
        "Cannot retrieve locked node %s" % remote_node
4163
    else:
4164
      self.remote_node_info = None
4165
    if remote_node == instance.primary_node:
4166
      raise errors.OpPrereqError("The specified node is the primary node of"
4167
                                 " the instance.")
4168
    elif remote_node == self.sec_node:
4169
      if self.op.mode == constants.REPLACE_DISK_SEC:
4170
        # this is for DRBD8, where we can't execute the same mode of
4171
        # replacement as for drbd7 (no different port allocated)
4172
        raise errors.OpPrereqError("Same secondary given, cannot execute"
4173
                                   " replacement")
4174
    if instance.disk_template == constants.DT_DRBD8:
4175
      if (self.op.mode == constants.REPLACE_DISK_ALL and
4176
          remote_node is not None):
4177
        # switch to replace secondary mode
4178
        self.op.mode = constants.REPLACE_DISK_SEC
4179

    
4180
      if self.op.mode == constants.REPLACE_DISK_ALL:
4181
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
4182
                                   " secondary disk replacement, not"
4183
                                   " both at once")
4184
      elif self.op.mode == constants.REPLACE_DISK_PRI:
4185
        if remote_node is not None:
4186
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
4187
                                     " the secondary while doing a primary"
4188
                                     " node disk replacement")
4189
        self.tgt_node = instance.primary_node
4190
        self.oth_node = instance.secondary_nodes[0]
4191
      elif self.op.mode == constants.REPLACE_DISK_SEC:
4192
        self.new_node = remote_node # this can be None, in which case
4193
                                    # we don't change the secondary
4194
        self.tgt_node = instance.secondary_nodes[0]
4195
        self.oth_node = instance.primary_node
4196
      else:
4197
        raise errors.ProgrammerError("Unhandled disk replace mode")
4198

    
4199
    if not self.op.disks:
4200
      self.op.disks = range(len(instance.disks))
4201

    
4202
    for disk_idx in self.op.disks:
4203
      instance.FindDisk(disk_idx)
4204

    
4205
  def _ExecD8DiskOnly(self, feedback_fn):
4206
    """Replace a disk on the primary or secondary for dbrd8.
4207

4208
    The algorithm for replace is quite complicated:
4209

4210
      1. for each disk to be replaced:
4211

4212
        1. create new LVs on the target node with unique names
4213
        1. detach old LVs from the drbd device
4214
        1. rename old LVs to name_replaced.<time_t>
4215
        1. rename new LVs to old LVs
4216
        1. attach the new LVs (with the old names now) to the drbd device
4217

4218
      1. wait for sync across all devices
4219

4220
      1. for each modified disk:
4221

4222
        1. remove old LVs (which have the name name_replaces.<time_t>)
4223

4224
    Failures are not very well handled.
4225

4226
    """
4227
    steps_total = 6
4228
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
4229
    instance = self.instance
4230
    iv_names = {}
4231
    vgname = self.cfg.GetVGName()
4232
    # start of work
4233
    cfg = self.cfg
4234
    tgt_node = self.tgt_node
4235
    oth_node = self.oth_node
4236

    
4237
    # Step: check device activation
4238
    self.proc.LogStep(1, steps_total, "check device existence")
4239
    info("checking volume groups")
4240
    my_vg = cfg.GetVGName()
4241
    results = self.rpc.call_vg_list([oth_node, tgt_node])
4242
    if not results:
4243
      raise errors.OpExecError("Can't list volume groups on the nodes")
4244
    for node in oth_node, tgt_node:
4245
      res = results.get(node, False)
4246
      if not res or my_vg not in res:
4247
        raise errors.OpExecError("Volume group '%s' not found on %s" %
4248
                                 (my_vg, node))
4249
    for idx, dev in enumerate(instance.disks):
4250
      if idx not in self.op.disks:
4251
        continue
4252
      for node in tgt_node, oth_node:
4253
        info("checking disk/%d on %s" % (idx, node))
4254
        cfg.SetDiskID(dev, node)
4255
        if not self.rpc.call_blockdev_find(node, dev):
4256
          raise errors.OpExecError("Can't find disk/%d on node %s" %
4257
                                   (idx, node))
4258

    
4259
    # Step: check other node consistency
4260
    self.proc.LogStep(2, steps_total, "check peer consistency")
4261
    for idx, dev in enumerate(instance.disks):
4262
      if idx not in self.op.disks:
4263
        continue
4264
      info("checking disk/%d consistency on %s" % (idx, oth_node))
4265
      if not _CheckDiskConsistency(self, dev, oth_node,
4266
                                   oth_node==instance.primary_node):
4267
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
4268
                                 " to replace disks on this node (%s)" %
4269
                                 (oth_node, tgt_node))
4270

    
4271
    # Step: create new storage
4272
    self.proc.LogStep(3, steps_total, "allocate new storage")
4273
    for idx, dev in enumerate(instance.disks):
4274
      if idx not in self.op.disks:
4275
        continue
4276
      size = dev.size
4277
      cfg.SetDiskID(dev, tgt_node)
4278
      lv_names = [".disk%d_%s" % (idx, suf)
4279
                  for suf in ["data", "meta"]]
4280
      names = _GenerateUniqueNames(self, lv_names)
4281
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
4282
                             logical_id=(vgname, names[0]))
4283
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
4284
                             logical_id=(vgname, names[1]))
4285
      new_lvs = [lv_data, lv_meta]
4286
      old_lvs = dev.children
4287
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
4288
      info("creating new local storage on %s for %s" %
4289
           (tgt_node, dev.iv_name))
4290
      # since we *always* want to create this LV, we use the
4291
      # _Create...OnPrimary (which forces the creation), even if we
4292
      # are talking about the secondary node
4293
      for new_lv in new_lvs:
4294
        if not _CreateBlockDevOnPrimary(self, tgt_node, instance, new_lv,
4295
                                        _GetInstanceInfoText(instance)):
4296
          raise errors.OpExecError("Failed to create new LV named '%s' on"
4297
                                   " node '%s'" %
4298
                                   (new_lv.logical_id[1], tgt_node))
4299

    
4300
    # Step: for each lv, detach+rename*2+attach
4301
    self.proc.LogStep(4, steps_total, "change drbd configuration")
4302
    for dev, old_lvs, new_lvs in iv_names.itervalues():
4303
      info("detaching %s drbd from local storage" % dev.iv_name)
4304
      if not self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
4305
        raise errors.OpExecError("Can't detach drbd from local storage on node"
4306
                                 " %s for device %s" % (tgt_node, dev.iv_name))
4307
      #dev.children = []
4308
      #cfg.Update(instance)
4309

    
4310
      # ok, we created the new LVs, so now we know we have the needed
4311
      # storage; as such, we proceed on the target node to rename
4312
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
4313
      # using the assumption that logical_id == physical_id (which in
4314
      # turn is the unique_id on that node)
4315

    
4316
      # FIXME(iustin): use a better name for the replaced LVs
4317
      temp_suffix = int(time.time())
4318
      ren_fn = lambda d, suff: (d.physical_id[0],
4319
                                d.physical_id[1] + "_replaced-%s" % suff)
4320
      # build the rename list based on what LVs exist on the node
4321
      rlist = []
4322
      for to_ren in old_lvs:
4323
        find_res = self.rpc.call_blockdev_find(tgt_node, to_ren)
4324
        if find_res is not None: # device exists
4325
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
4326

    
4327
      info("renaming the old LVs on the target node")
4328
      if not self.rpc.call_blockdev_rename(tgt_node, rlist):
4329
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
4330
      # now we rename the new LVs to the old LVs
4331
      info("renaming the new LVs on the target node")
4332
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
4333
      if not self.rpc.call_blockdev_rename(tgt_node, rlist):
4334
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
4335

    
4336
      for old, new in zip(old_lvs, new_lvs):
4337
        new.logical_id = old.logical_id
4338
        cfg.SetDiskID(new, tgt_node)
4339

    
4340
      for disk in old_lvs:
4341
        disk.logical_id = ren_fn(disk, temp_suffix)
4342
        cfg.SetDiskID(disk, tgt_node)
4343

    
4344
      # now that the new lvs have the old name, we can add them to the device
4345
      info("adding new mirror component on %s" % tgt_node)
4346
      if not self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
4347
        for new_lv in new_lvs:
4348
          if not self.rpc.call_blockdev_remove(tgt_node, new_lv):
4349
            warning("Can't rollback device %s", hint="manually cleanup unused"
4350
                    " logical volumes")
4351
        raise errors.OpExecError("Can't add local storage to drbd")
4352

    
4353
      dev.children = new_lvs
4354
      cfg.Update(instance)
4355

    
4356
    # Step: wait for sync
4357

    
4358
    # this can fail as the old devices are degraded and _WaitForSync
4359
    # does a combined result over all disks, so we don't check its
4360
    # return value
4361
    self.proc.LogStep(5, steps_total, "sync devices")
4362
    _WaitForSync(self, instance, unlock=True)
4363

    
4364
    # so check manually all the devices
4365
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
4366
      cfg.SetDiskID(dev, instance.primary_node)
4367
      is_degr = self.rpc.call_blockdev_find(instance.primary_node, dev)[5]
4368
      if is_degr:
4369
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
4370

    
4371
    # Step: remove old storage
4372
    self.proc.LogStep(6, steps_total, "removing old storage")
4373
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
4374
      info("remove logical volumes for %s" % name)
4375
      for lv in old_lvs:
4376
        cfg.SetDiskID(lv, tgt_node)
4377
        if not self.rpc.call_blockdev_remove(tgt_node, lv):
4378
          warning("Can't remove old LV", hint="manually remove unused LVs")
4379
          continue
4380

    
4381
  def _ExecD8Secondary(self, feedback_fn):
4382
    """Replace the secondary node for drbd8.
4383

4384
    The algorithm for replace is quite complicated:
4385
      - for all disks of the instance:
4386
        - create new LVs on the new node with same names
4387
        - shutdown the drbd device on the old secondary
4388
        - disconnect the drbd network on the primary
4389
        - create the drbd device on the new secondary
4390
        - network attach the drbd on the primary, using an artifice:
4391
          the drbd code for Attach() will connect to the network if it
4392
          finds a device which is connected to the good local disks but
4393
          not network enabled
4394
      - wait for sync across all devices
4395
      - remove all disks from the old secondary
4396

4397
    Failures are not very well handled.
4398

4399
    """
4400
    steps_total = 6
4401
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
4402
    instance = self.instance
4403
    iv_names = {}
4404
    vgname = self.cfg.GetVGName()
4405
    # start of work
4406
    cfg = self.cfg
4407
    old_node = self.tgt_node
4408
    new_node = self.new_node
4409
    pri_node = instance.primary_node
4410

    
4411
    # Step: check device activation
4412
    self.proc.LogStep(1, steps_total, "check device existence")
4413
    info("checking volume groups")
4414
    my_vg = cfg.GetVGName()
4415
    results = self.rpc.call_vg_list([pri_node, new_node])
4416
    if not results:
4417
      raise errors.OpExecError("Can't list volume groups on the nodes")
4418
    for node in pri_node, new_node:
4419
      res = results.get(node, False)
4420
      if not res or my_vg not in res:
4421
        raise errors.OpExecError("Volume group '%s' not found on %s" %
4422
                                 (my_vg, node))
4423
    for idx, dev in enumerate(instance.disks):
4424
      if idx not in self.op.disks:
4425
        continue
4426
      info("checking disk/%d on %s" % (idx, pri_node))
4427
      cfg.SetDiskID(dev, pri_node)
4428
      if not self.rpc.call_blockdev_find(pri_node, dev):
4429
        raise errors.OpExecError("Can't find disk/%d on node %s" %
4430
                                 (idx, pri_node))
4431

    
4432
    # Step: check other node consistency
4433
    self.proc.LogStep(2, steps_total, "check peer consistency")
4434
    for idx, dev in enumerate(instance.disks):
4435
      if idx not in self.op.disks:
4436
        continue
4437
      info("checking disk/%d consistency on %s" % (idx, pri_node))
4438
      if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
4439
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
4440
                                 " unsafe to replace the secondary" %
4441
                                 pri_node)
4442

    
4443
    # Step: create new storage
4444
    self.proc.LogStep(3, steps_total, "allocate new storage")
4445
    for idx, dev in enumerate(instance.disks):
4446
      size = dev.size
4447
      info("adding new local storage on %s for disk/%d" %
4448
           (new_node, idx))
4449
      # since we *always* want to create this LV, we use the
4450
      # _Create...OnPrimary (which forces the creation), even if we
4451
      # are talking about the secondary node
4452
      for new_lv in dev.children:
4453
        if not _CreateBlockDevOnPrimary(self, new_node, instance, new_lv,
4454
                                        _GetInstanceInfoText(instance)):
4455
          raise errors.OpExecError("Failed to create new LV named '%s' on"
4456
                                   " node '%s'" %
4457
                                   (new_lv.logical_id[1], new_node))
4458

    
4459
    # Step 4: dbrd minors and drbd setups changes
4460
    # after this, we must manually remove the drbd minors on both the
4461
    # error and the success paths
4462
    minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
4463
                                   instance.name)
4464
    logging.debug("Allocated minors %s" % (minors,))
4465
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
4466
    for idx, (dev, new_minor) in enumerate(zip(instance.disks, minors)):
4467
      size = dev.size
4468
      info("activating a new drbd on %s for disk/%d" % (new_node, idx))
4469
      # create new devices on new_node
4470
      if pri_node == dev.logical_id[0]:
4471
        new_logical_id = (pri_node, new_node,
4472
                          dev.logical_id[2], dev.logical_id[3], new_minor,
4473
                          dev.logical_id[5])
4474
      else:
4475
        new_logical_id = (new_node, pri_node,
4476
                          dev.logical_id[2], new_minor, dev.logical_id[4],
4477
                          dev.logical_id[5])
4478
      iv_names[idx] = (dev, dev.children, new_logical_id)
4479
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
4480
                    new_logical_id)
4481
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
4482
                              logical_id=new_logical_id,
4483
                              children=dev.children)
4484
      if not _CreateBlockDevOnSecondary(self, new_node, instance,
4485
                                        new_drbd, False,
4486
                                        _GetInstanceInfoText(instance)):
4487
        self.cfg.ReleaseDRBDMinors(instance.name)
4488
        raise errors.OpExecError("Failed to create new DRBD on"
4489
                                 " node '%s'" % new_node)
4490

    
4491
    for idx, dev in enumerate(instance.disks):
4492
      # we have new devices, shutdown the drbd on the old secondary
4493
      info("shutting down drbd for disk/%d on old node" % idx)
4494
      cfg.SetDiskID(dev, old_node)
4495
      if not self.rpc.call_blockdev_shutdown(old_node, dev):
4496
        warning("Failed to shutdown drbd for disk/%d on old node" % idx,
4497
                hint="Please cleanup this device manually as soon as possible")
4498

    
4499
    info("detaching primary drbds from the network (=> standalone)")
4500
    done = 0
4501
    for idx, dev in enumerate(instance.disks):
4502
      cfg.SetDiskID(dev, pri_node)
4503
      # set the network part of the physical (unique in bdev terms) id
4504
      # to None, meaning detach from network
4505
      dev.physical_id = (None, None, None, None) + dev.physical_id[4:]
4506
      # and 'find' the device, which will 'fix' it to match the
4507
      # standalone state
4508
      if self.rpc.call_blockdev_find(pri_node, dev):
4509
        done += 1
4510
      else:
4511
        warning("Failed to detach drbd disk/%d from network, unusual case" %
4512
                idx)
4513

    
4514
    if not done:
4515
      # no detaches succeeded (very unlikely)
4516
      self.cfg.ReleaseDRBDMinors(instance.name)
4517
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
4518

    
4519
    # if we managed to detach at least one, we update all the disks of
4520
    # the instance to point to the new secondary
4521
    info("updating instance configuration")
4522
    for dev, _, new_logical_id in iv_names.itervalues():
4523
      dev.logical_id = new_logical_id
4524
      cfg.SetDiskID(dev, pri_node)
4525
    cfg.Update(instance)
4526
    # we can remove now the temp minors as now the new values are
4527
    # written to the config file (and therefore stable)
4528
    self.cfg.ReleaseDRBDMinors(instance.name)
4529

    
4530
    # and now perform the drbd attach
4531
    info("attaching primary drbds to new secondary (standalone => connected)")
4532
    failures = []
4533
    for idx, dev in enumerate(instance.disks):
4534
      info("attaching primary drbd for disk/%d to new secondary node" % idx)
4535
      # since the attach is smart, it's enough to 'find' the device,
4536
      # it will automatically activate the network, if the physical_id
4537
      # is correct
4538
      cfg.SetDiskID(dev, pri_node)
4539
      logging.debug("Disk to attach: %s", dev)
4540
      if not self.rpc.call_blockdev_find(pri_node, dev):
4541
        warning("can't attach drbd disk/%d to new secondary!" % idx,
4542
                "please do a gnt-instance info to see the status of disks")
4543

    
4544
    # this can fail as the old devices are degraded and _WaitForSync
4545
    # does a combined result over all disks, so we don't check its
4546
    # return value
4547
    self.proc.LogStep(5, steps_total, "sync devices")
4548
    _WaitForSync(self, instance, unlock=True)
4549

    
4550
    # so check manually all the devices
4551
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
4552
      cfg.SetDiskID(dev, pri_node)
4553
      is_degr = self.rpc.call_blockdev_find(pri_node, dev)[5]
4554
      if is_degr:
4555
        raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx)
4556

    
4557
    self.proc.LogStep(6, steps_total, "removing old storage")
4558
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
4559
      info("remove logical volumes for disk/%d" % idx)
4560
      for lv in old_lvs:
4561
        cfg.SetDiskID(lv, old_node)
4562
        if not self.rpc.call_blockdev_remove(old_node, lv):
4563
          warning("Can't remove LV on old secondary",
4564
                  hint="Cleanup stale volumes by hand")
4565

    
4566
  def Exec(self, feedback_fn):
4567
    """Execute disk replacement.
4568

4569
    This dispatches the disk replacement to the appropriate handler.
4570

4571
    """
4572
    instance = self.instance
4573

    
4574
    # Activate the instance disks if we're replacing them on a down instance
4575
    if instance.status == "down":
4576
      _StartInstanceDisks(self, instance, True)
4577

    
4578
    if instance.disk_template == constants.DT_DRBD8:
4579
      if self.op.remote_node is None:
4580
        fn = self._ExecD8DiskOnly
4581
      else:
4582
        fn = self._ExecD8Secondary
4583
    else:
4584
      raise errors.ProgrammerError("Unhandled disk replacement case")
4585

    
4586
    ret = fn(feedback_fn)
4587

    
4588
    # Deactivate the instance disks if we're replacing them on a down instance
4589
    if instance.status == "down":
4590
      _SafeShutdownInstanceDisks(self, instance)
4591

    
4592
    return ret
4593

    
4594

    
4595
class LUGrowDisk(LogicalUnit):
4596
  """Grow a disk of an instance.
4597

4598
  """
4599
  HPATH = "disk-grow"
4600
  HTYPE = constants.HTYPE_INSTANCE
4601
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
4602
  REQ_BGL = False
4603

    
4604
  def ExpandNames(self):
4605
    self._ExpandAndLockInstance()
4606
    self.needed_locks[locking.LEVEL_NODE] = []
4607
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4608

    
4609
  def DeclareLocks(self, level):
4610
    if level == locking.LEVEL_NODE:
4611
      self._LockInstancesNodes()
4612

    
4613
  def BuildHooksEnv(self):
4614
    """Build hooks env.
4615

4616
    This runs on the master, the primary and all the secondaries.
4617

4618
    """
4619
    env = {
4620
      "DISK": self.op.disk,
4621
      "AMOUNT": self.op.amount,
4622
      }
4623
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4624
    nl = [
4625
      self.cfg.GetMasterNode(),
4626
      self.instance.primary_node,
4627
      ]
4628
    return env, nl, nl
4629

    
4630
  def CheckPrereq(self):
4631
    """Check prerequisites.
4632

4633
    This checks that the instance is in the cluster.
4634

4635
    """
4636
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4637
    assert instance is not None, \
4638
      "Cannot retrieve locked instance %s" % self.op.instance_name
4639

    
4640
    self.instance = instance
4641

    
4642
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
4643
      raise errors.OpPrereqError("Instance's disk layout does not support"
4644
                                 " growing.")
4645

    
4646
    self.disk = instance.FindDisk(self.op.disk)
4647

    
4648
    nodenames = [instance.primary_node] + list(instance.secondary_nodes)
4649
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4650
                                       instance.hypervisor)
4651
    for node in nodenames:
4652
      info = nodeinfo.get(node, None)
4653
      if not info:
4654
        raise errors.OpPrereqError("Cannot get current information"
4655
                                   " from node '%s'" % node)
4656
      vg_free = info.get('vg_free', None)
4657
      if not isinstance(vg_free, int):
4658
        raise errors.OpPrereqError("Can't compute free disk space on"
4659
                                   " node %s" % node)
4660
      if self.op.amount > info['vg_free']:
4661
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
4662
                                   " %d MiB available, %d MiB required" %
4663
                                   (node, info['vg_free'], self.op.amount))
4664

    
4665
  def Exec(self, feedback_fn):
4666
    """Execute disk grow.
4667

4668
    """
4669
    instance = self.instance
4670
    disk = self.disk
4671
    for node in (instance.secondary_nodes + (instance.primary_node,)):
4672
      self.cfg.SetDiskID(disk, node)
4673
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
4674
      if (not result or not isinstance(result, (list, tuple)) or
4675
          len(result) != 2):
4676
        raise errors.OpExecError("grow request failed to node %s" % node)
4677
      elif not result[0]:
4678
        raise errors.OpExecError("grow request failed to node %s: %s" %
4679
                                 (node, result[1]))
4680
    disk.RecordGrow(self.op.amount)
4681
    self.cfg.Update(instance)
4682
    if self.op.wait_for_sync:
4683
      disk_abort = not _WaitForSync(self, instance)
4684
      if disk_abort:
4685
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
4686
                             " status.\nPlease check the instance.")
4687

    
4688

    
4689
class LUQueryInstanceData(NoHooksLU):
4690
  """Query runtime instance data.
4691

4692
  """
4693
  _OP_REQP = ["instances", "static"]
4694
  REQ_BGL = False
4695

    
4696
  def ExpandNames(self):
4697
    self.needed_locks = {}
4698
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
4699

    
4700
    if not isinstance(self.op.instances, list):
4701
      raise errors.OpPrereqError("Invalid argument type 'instances'")
4702

    
4703
    if self.op.instances:
4704
      self.wanted_names = []
4705
      for name in self.op.instances:
4706
        full_name = self.cfg.ExpandInstanceName(name)
4707
        if full_name is None:
4708
          raise errors.OpPrereqError("Instance '%s' not known" %
4709
                                     self.op.instance_name)
4710
        self.wanted_names.append(full_name)
4711
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
4712
    else:
4713
      self.wanted_names = None
4714
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
4715

    
4716
    self.needed_locks[locking.LEVEL_NODE] = []
4717
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4718

    
4719
  def DeclareLocks(self, level):
4720
    if level == locking.LEVEL_NODE:
4721
      self._LockInstancesNodes()
4722

    
4723
  def CheckPrereq(self):
4724
    """Check prerequisites.
4725

4726
    This only checks the optional instance list against the existing names.
4727

4728
    """
4729
    if self.wanted_names is None:
4730
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
4731

    
4732
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
4733
                             in self.wanted_names]
4734
    return
4735

    
4736
  def _ComputeDiskStatus(self, instance, snode, dev):
4737
    """Compute block device status.
4738

4739
    """
4740
    static = self.op.static
4741
    if not static:
4742
      self.cfg.SetDiskID(dev, instance.primary_node)
4743
      dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
4744
    else:
4745
      dev_pstatus = None
4746

    
4747
    if dev.dev_type in constants.LDS_DRBD:
4748
      # we change the snode then (otherwise we use the one passed in)
4749
      if dev.logical_id[0] == instance.primary_node:
4750
        snode = dev.logical_id[1]
4751
      else:
4752
        snode = dev.logical_id[0]
4753

    
4754
    if snode and not static:
4755
      self.cfg.SetDiskID(dev, snode)
4756
      dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
4757
    else:
4758
      dev_sstatus = None
4759

    
4760
    if dev.children:
4761
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
4762
                      for child in dev.children]
4763
    else:
4764
      dev_children = []
4765

    
4766
    data = {
4767
      "iv_name": dev.iv_name,
4768
      "dev_type": dev.dev_type,
4769
      "logical_id": dev.logical_id,
4770
      "physical_id": dev.physical_id,
4771
      "pstatus": dev_pstatus,
4772
      "sstatus": dev_sstatus,
4773
      "children": dev_children,
4774
      "mode": dev.mode,
4775
      }
4776

    
4777
    return data
4778

    
4779
  def Exec(self, feedback_fn):
4780
    """Gather and return data"""
4781
    result = {}
4782

    
4783
    cluster = self.cfg.GetClusterInfo()
4784

    
4785
    for instance in self.wanted_instances:
4786
      if not self.op.static:
4787
        remote_info = self.rpc.call_instance_info(instance.primary_node,
4788
                                                  instance.name,
4789
                                                  instance.hypervisor)
4790
        if remote_info and "state" in remote_info:
4791
          remote_state = "up"
4792
        else:
4793
          remote_state = "down"
4794
      else:
4795
        remote_state = None
4796
      if instance.status == "down":
4797
        config_state = "down"
4798
      else:
4799
        config_state = "up"
4800

    
4801
      disks = [self._ComputeDiskStatus(instance, None, device)
4802
               for device in instance.disks]
4803

    
4804
      idict = {
4805
        "name": instance.name,
4806
        "config_state": config_state,
4807
        "run_state": remote_state,
4808
        "pnode": instance.primary_node,
4809
        "snodes": instance.secondary_nodes,
4810
        "os": instance.os,
4811
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4812
        "disks": disks,
4813
        "hypervisor": instance.hypervisor,
4814
        "network_port": instance.network_port,
4815
        "hv_instance": instance.hvparams,
4816
        "hv_actual": cluster.FillHV(instance),
4817
        "be_instance": instance.beparams,
4818
        "be_actual": cluster.FillBE(instance),
4819
        }
4820

    
4821
      result[instance.name] = idict
4822

    
4823
    return result
4824

    
4825

    
4826
class LUSetInstanceParams(LogicalUnit):
4827
  """Modifies an instances's parameters.
4828

4829
  """
4830
  HPATH = "instance-modify"
4831
  HTYPE = constants.HTYPE_INSTANCE
4832
  _OP_REQP = ["instance_name"]
4833
  REQ_BGL = False
4834

    
4835
  def CheckArguments(self):
4836
    if not hasattr(self.op, 'nics'):
4837
      self.op.nics = []
4838
    if not hasattr(self.op, 'disks'):
4839
      self.op.disks = []
4840
    if not hasattr(self.op, 'beparams'):
4841
      self.op.beparams = {}
4842
    if not hasattr(self.op, 'hvparams'):
4843
      self.op.hvparams = {}
4844
    self.op.force = getattr(self.op, "force", False)
4845
    if not (self.op.nics or self.op.disks or
4846
            self.op.hvparams or self.op.beparams):
4847
      raise errors.OpPrereqError("No changes submitted")
4848

    
4849
    for item in (constants.BE_MEMORY, constants.BE_VCPUS):
4850
      val = self.op.beparams.get(item, None)
4851
      if val is not None:
4852
        try:
4853
          val = int(val)
4854
        except ValueError, err:
4855
          raise errors.OpPrereqError("Invalid %s size: %s" % (item, str(err)))
4856
        self.op.beparams[item] = val
4857
    # Disk validation
4858
    disk_addremove = 0
4859
    for disk_op, disk_dict in self.op.disks:
4860
      if disk_op == constants.DDM_REMOVE:
4861
        disk_addremove += 1
4862
        continue
4863
      elif disk_op == constants.DDM_ADD:
4864
        disk_addremove += 1
4865
      else:
4866
        if not isinstance(disk_op, int):
4867
          raise errors.OpPrereqError("Invalid disk index")
4868
      if disk_op == constants.DDM_ADD:
4869
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
4870
        if mode not in (constants.DISK_RDONLY, constants.DISK_RDWR):
4871
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode)
4872
        size = disk_dict.get('size', None)
4873
        if size is None:
4874
          raise errors.OpPrereqError("Required disk parameter size missing")
4875
        try:
4876
          size = int(size)
4877
        except ValueError, err:
4878
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
4879
                                     str(err))
4880
        disk_dict['size'] = size
4881
      else:
4882
        # modification of disk
4883
        if 'size' in disk_dict:
4884
          raise errors.OpPrereqError("Disk size change not possible, use"
4885
                                     " grow-disk")
4886

    
4887
    if disk_addremove > 1:
4888
      raise errors.OpPrereqError("Only one disk add or remove operation"
4889
                                 " supported at a time")
4890

    
4891
    # NIC validation
4892
    nic_addremove = 0
4893
    for nic_op, nic_dict in self.op.nics:
4894
      if nic_op == constants.DDM_REMOVE:
4895
        nic_addremove += 1
4896
        continue
4897
      elif nic_op == constants.DDM_ADD:
4898
        nic_addremove += 1
4899
      else:
4900
        if not isinstance(nic_op, int):
4901
          raise errors.OpPrereqError("Invalid nic index")
4902

    
4903
      # nic_dict should be a dict
4904
      nic_ip = nic_dict.get('ip', None)
4905
      if nic_ip is not None:
4906
        if nic_ip.lower() == "none":
4907
          nic_dict['ip'] = None
4908
        else:
4909
          if not utils.IsValidIP(nic_ip):
4910
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip)
4911
      # we can only check None bridges and assign the default one
4912
      nic_bridge = nic_dict.get('bridge', None)
4913
      if nic_bridge is None:
4914
        nic_dict['bridge'] = self.cfg.GetDefBridge()
4915
      # but we can validate MACs
4916
      nic_mac = nic_dict.get('mac', None)
4917
      if nic_mac is not None:
4918
        if self.cfg.IsMacInUse(nic_mac):
4919
          raise errors.OpPrereqError("MAC address %s already in use"
4920
                                     " in cluster" % nic_mac)
4921
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4922
          if not utils.IsValidMac(nic_mac):
4923
            raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac)
4924
    if nic_addremove > 1:
4925
      raise errors.OpPrereqError("Only one NIC add or remove operation"
4926
                                 " supported at a time")
4927

    
4928
  def ExpandNames(self):
4929
    self._ExpandAndLockInstance()
4930
    self.needed_locks[locking.LEVEL_NODE] = []
4931
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4932

    
4933
  def DeclareLocks(self, level):
4934
    if level == locking.LEVEL_NODE:
4935
      self._LockInstancesNodes()
4936

    
4937
  def BuildHooksEnv(self):
4938
    """Build hooks env.
4939

4940
    This runs on the master, primary and secondaries.
4941

4942
    """
4943
    args = dict()
4944
    if constants.BE_MEMORY in self.be_new:
4945
      args['memory'] = self.be_new[constants.BE_MEMORY]
4946
    if constants.BE_VCPUS in self.be_new:
4947
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
4948
    # FIXME: readd disk/nic changes
4949
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
4950
    nl = [self.cfg.GetMasterNode(),
4951
          self.instance.primary_node] + list(self.instance.secondary_nodes)
4952
    return env, nl, nl
4953

    
4954
  def CheckPrereq(self):
4955
    """Check prerequisites.
4956

4957
    This only checks the instance list against the existing names.
4958

4959
    """
4960
    force = self.force = self.op.force
4961

    
4962
    # checking the new params on the primary/secondary nodes
4963

    
4964
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4965
    assert self.instance is not None, \
4966
      "Cannot retrieve locked instance %s" % self.op.instance_name
4967
    pnode = self.instance.primary_node
4968
    nodelist = [pnode]
4969
    nodelist.extend(instance.secondary_nodes)
4970

    
4971
    # hvparams processing
4972
    if self.op.hvparams:
4973
      i_hvdict = copy.deepcopy(instance.hvparams)
4974
      for key, val in self.op.hvparams.iteritems():
4975
        if val is None:
4976
          try:
4977
            del i_hvdict[key]
4978
          except KeyError:
4979
            pass
4980
        else:
4981
          i_hvdict[key] = val
4982
      cluster = self.cfg.GetClusterInfo()
4983
      hv_new = cluster.FillDict(cluster.hvparams[instance.hypervisor],
4984
                                i_hvdict)
4985
      # local check
4986
      hypervisor.GetHypervisor(
4987
        instance.hypervisor).CheckParameterSyntax(hv_new)
4988
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
4989
      self.hv_new = hv_new # the new actual values
4990
      self.hv_inst = i_hvdict # the new dict (without defaults)
4991
    else:
4992
      self.hv_new = self.hv_inst = {}
4993

    
4994
    # beparams processing
4995
    if self.op.beparams:
4996
      i_bedict = copy.deepcopy(instance.beparams)
4997
      for key, val in self.op.beparams.iteritems():
4998
        if val is None:
4999
          try:
5000
            del i_bedict[key]
5001
          except KeyError:
5002
            pass
5003
        else:
5004
          i_bedict[key] = val
5005
      cluster = self.cfg.GetClusterInfo()
5006
      be_new = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
5007
                                i_bedict)
5008
      self.be_new = be_new # the new actual values
5009
      self.be_inst = i_bedict # the new dict (without defaults)
5010
    else:
5011
      self.be_new = self.be_inst = {}
5012

    
5013
    self.warn = []
5014

    
5015
    if constants.BE_MEMORY in self.op.beparams and not self.force:
5016
      mem_check_list = [pnode]
5017
      if be_new[constants.BE_AUTO_BALANCE]:
5018
        # either we changed auto_balance to yes or it was from before
5019
        mem_check_list.extend(instance.secondary_nodes)
5020
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
5021
                                                  instance.hypervisor)
5022
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
5023
                                         instance.hypervisor)
5024

    
5025
      if pnode not in nodeinfo or not isinstance(nodeinfo[pnode], dict):
5026
        # Assume the primary node is unreachable and go ahead
5027
        self.warn.append("Can't get info from primary node %s" % pnode)
5028
      else:
5029
        if instance_info:
5030
          current_mem = instance_info['memory']
5031
        else:
5032
          # Assume instance not running
5033
          # (there is a slight race condition here, but it's not very probable,
5034
          # and we have no other way to check)
5035
          current_mem = 0
5036
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
5037
                    nodeinfo[pnode]['memory_free'])
5038
        if miss_mem > 0:
5039
          raise errors.OpPrereqError("This change will prevent the instance"
5040
                                     " from starting, due to %d MB of memory"
5041
                                     " missing on its primary node" % miss_mem)
5042

    
5043
      if be_new[constants.BE_AUTO_BALANCE]:
5044
        for node in instance.secondary_nodes:
5045
          if node not in nodeinfo or not isinstance(nodeinfo[node], dict):
5046
            self.warn.append("Can't get info from secondary node %s" % node)
5047
          elif be_new[constants.BE_MEMORY] > nodeinfo[node]['memory_free']:
5048
            self.warn.append("Not enough memory to failover instance to"
5049
                             " secondary node %s" % node)
5050

    
5051
    # NIC processing
5052
    for nic_op, nic_dict in self.op.nics:
5053
      if nic_op == constants.DDM_REMOVE:
5054
        if not instance.nics:
5055
          raise errors.OpPrereqError("Instance has no NICs, cannot remove")
5056
        continue
5057
      if nic_op != constants.DDM_ADD:
5058
        # an existing nic
5059
        if nic_op < 0 or nic_op >= len(instance.nics):
5060
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
5061
                                     " are 0 to %d" %
5062
                                     (nic_op, len(instance.nics)))
5063
      nic_bridge = nic_dict.get('bridge', None)
5064
      if nic_bridge is not None:
5065
        if not self.rpc.call_bridges_exist(pnode, [nic_bridge]):
5066
          msg = ("Bridge '%s' doesn't exist on one of"
5067
                 " the instance nodes" % nic_bridge)
5068
          if self.force:
5069
            self.warn.append(msg)
5070
          else:
5071
            raise errors.OpPrereqError(msg)
5072

    
5073
    # DISK processing
5074
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
5075
      raise errors.OpPrereqError("Disk operations not supported for"
5076
                                 " diskless instances")
5077
    for disk_op, disk_dict in self.op.disks:
5078
      if disk_op == constants.DDM_REMOVE:
5079
        if len(instance.disks) == 1:
5080
          raise errors.OpPrereqError("Cannot remove the last disk of"
5081
                                     " an instance")
5082
        ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
5083
        ins_l = ins_l[pnode]
5084
        if not type(ins_l) is list:
5085
          raise errors.OpPrereqError("Can't contact node '%s'" % pnode)
5086
        if instance.name in ins_l:
5087
          raise errors.OpPrereqError("Instance is running, can't remove"
5088
                                     " disks.")
5089

    
5090
      if (disk_op == constants.DDM_ADD and
5091
          len(instance.nics) >= constants.MAX_DISKS):
5092
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
5093
                                   " add more" % constants.MAX_DISKS)
5094
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
5095
        # an existing disk
5096
        if disk_op < 0 or disk_op >= len(instance.disks):
5097
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
5098
                                     " are 0 to %d" %
5099
                                     (disk_op, len(instance.disks)))
5100

    
5101
    return
5102

    
5103
  def Exec(self, feedback_fn):
5104
    """Modifies an instance.
5105

5106
    All parameters take effect only at the next restart of the instance.
5107

5108
    """
5109
    # Process here the warnings from CheckPrereq, as we don't have a
5110
    # feedback_fn there.
5111
    for warn in self.warn:
5112
      feedback_fn("WARNING: %s" % warn)
5113

    
5114
    result = []
5115
    instance = self.instance
5116
    # disk changes
5117
    for disk_op, disk_dict in self.op.disks:
5118
      if disk_op == constants.DDM_REMOVE:
5119
        # remove the last disk
5120
        device = instance.disks.pop()
5121
        device_idx = len(instance.disks)
5122
        for node, disk in device.ComputeNodeTree(instance.primary_node):
5123
          self.cfg.SetDiskID(disk, node)
5124
          if not self.rpc.call_blockdev_remove(node, disk):
5125
            self.proc.LogWarning("Could not remove disk/%d on node %s,"
5126
                                 " continuing anyway", device_idx, node)
5127
        result.append(("disk/%d" % device_idx, "remove"))
5128
      elif disk_op == constants.DDM_ADD:
5129
        # add a new disk
5130
        if instance.disk_template == constants.DT_FILE:
5131
          file_driver, file_path = instance.disks[0].logical_id
5132
          file_path = os.path.dirname(file_path)
5133
        else:
5134
          file_driver = file_path = None
5135
        disk_idx_base = len(instance.disks)
5136
        new_disk = _GenerateDiskTemplate(self,
5137
                                         instance.disk_template,
5138
                                         instance, instance.primary_node,
5139
                                         instance.secondary_nodes,
5140
                                         [disk_dict],
5141
                                         file_path,
5142
                                         file_driver,
5143
                                         disk_idx_base)[0]
5144
        new_disk.mode = disk_dict['mode']
5145
        instance.disks.append(new_disk)
5146
        info = _GetInstanceInfoText(instance)
5147

    
5148
        logging.info("Creating volume %s for instance %s",
5149
                     new_disk.iv_name, instance.name)
5150
        # Note: this needs to be kept in sync with _CreateDisks
5151
        #HARDCODE
5152
        for secondary_node in instance.secondary_nodes:
5153
          if not _CreateBlockDevOnSecondary(self, secondary_node, instance,
5154
                                            new_disk, False, info):
5155
            self.LogWarning("Failed to create volume %s (%s) on"
5156
                            " secondary node %s!",
5157
                            new_disk.iv_name, new_disk, secondary_node)
5158
        #HARDCODE
5159
        if not _CreateBlockDevOnPrimary(self, instance.primary_node,
5160
                                        instance, new_disk, info):
5161
          self.LogWarning("Failed to create volume %s on primary!",
5162
                          new_disk.iv_name)
5163
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
5164
                       (new_disk.size, new_disk.mode)))
5165
      else:
5166
        # change a given disk
5167
        instance.disks[disk_op].mode = disk_dict['mode']
5168
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
5169
    # NIC changes
5170
    for nic_op, nic_dict in self.op.nics:
5171
      if nic_op == constants.DDM_REMOVE:
5172
        # remove the last nic
5173
        del instance.nics[-1]
5174
        result.append(("nic.%d" % len(instance.nics), "remove"))
5175
      elif nic_op == constants.DDM_ADD:
5176
        # add a new nic
5177
        if 'mac' not in nic_dict:
5178
          mac = constants.VALUE_GENERATE
5179
        else:
5180
          mac = nic_dict['mac']
5181
        if mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5182
          mac = self.cfg.GenerateMAC()
5183
        new_nic = objects.NIC(mac=mac, ip=nic_dict.get('ip', None),
5184
                              bridge=nic_dict.get('bridge', None))
5185
        instance.nics.append(new_nic)
5186
        result.append(("nic.%d" % (len(instance.nics) - 1),
5187
                       "add:mac=%s,ip=%s,bridge=%s" %
5188
                       (new_nic.mac, new_nic.ip, new_nic.bridge)))
5189
      else:
5190
        # change a given nic
5191
        for key in 'mac', 'ip', 'bridge':
5192
          if key in nic_dict:
5193
            setattr(instance.nics[nic_op], key, nic_dict[key])
5194
            result.append(("nic.%s/%d" % (key, nic_op), nic_dict[key]))
5195

    
5196
    # hvparams changes
5197
    if self.op.hvparams:
5198
      instance.hvparams = self.hv_new
5199
      for key, val in self.op.hvparams.iteritems():
5200
        result.append(("hv/%s" % key, val))
5201

    
5202
    # beparams changes
5203
    if self.op.beparams:
5204
      instance.beparams = self.be_inst
5205
      for key, val in self.op.beparams.iteritems():
5206
        result.append(("be/%s" % key, val))
5207

    
5208
    self.cfg.Update(instance)
5209

    
5210
    return result
5211

    
5212

    
5213
class LUQueryExports(NoHooksLU):
5214
  """Query the exports list
5215

5216
  """
5217
  _OP_REQP = ['nodes']
5218
  REQ_BGL = False
5219

    
5220
  def ExpandNames(self):
5221
    self.needed_locks = {}
5222
    self.share_locks[locking.LEVEL_NODE] = 1
5223
    if not self.op.nodes:
5224
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5225
    else:
5226
      self.needed_locks[locking.LEVEL_NODE] = \
5227
        _GetWantedNodes(self, self.op.nodes)
5228

    
5229
  def CheckPrereq(self):
5230
    """Check prerequisites.
5231

5232
    """
5233
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
5234

    
5235
  def Exec(self, feedback_fn):
5236
    """Compute the list of all the exported system images.
5237

5238
    @rtype: dict
5239
    @return: a dictionary with the structure node->(export-list)
5240
        where export-list is a list of the instances exported on
5241
        that node.
5242

5243
    """
5244
    return self.rpc.call_export_list(self.nodes)
5245

    
5246

    
5247
class LUExportInstance(LogicalUnit):
5248
  """Export an instance to an image in the cluster.
5249

5250
  """
5251
  HPATH = "instance-export"
5252
  HTYPE = constants.HTYPE_INSTANCE
5253
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
5254
  REQ_BGL = False
5255

    
5256
  def ExpandNames(self):
5257
    self._ExpandAndLockInstance()
5258
    # FIXME: lock only instance primary and destination node
5259
    #
5260
    # Sad but true, for now we have do lock all nodes, as we don't know where
5261
    # the previous export might be, and and in this LU we search for it and
5262
    # remove it from its current node. In the future we could fix this by:
5263
    #  - making a tasklet to search (share-lock all), then create the new one,
5264
    #    then one to remove, after
5265
    #  - removing the removal operation altoghether
5266
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5267

    
5268
  def DeclareLocks(self, level):
5269
    """Last minute lock declaration."""
5270
    # All nodes are locked anyway, so nothing to do here.
5271

    
5272
  def BuildHooksEnv(self):
5273
    """Build hooks env.
5274

5275
    This will run on the master, primary node and target node.
5276

5277
    """
5278
    env = {
5279
      "EXPORT_NODE": self.op.target_node,
5280
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
5281
      }
5282
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5283
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
5284
          self.op.target_node]
5285
    return env, nl, nl
5286

    
5287
  def CheckPrereq(self):
5288
    """Check prerequisites.
5289

5290
    This checks that the instance and node names are valid.
5291

5292
    """
5293
    instance_name = self.op.instance_name
5294
    self.instance = self.cfg.GetInstanceInfo(instance_name)
5295
    assert self.instance is not None, \
5296
          "Cannot retrieve locked instance %s" % self.op.instance_name
5297

    
5298
    self.dst_node = self.cfg.GetNodeInfo(
5299
      self.cfg.ExpandNodeName(self.op.target_node))
5300

    
5301
    if self.dst_node is None:
5302
      # This is wrong node name, not a non-locked node
5303
      raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node)
5304

    
5305
    # instance disk type verification
5306
    for disk in self.instance.disks:
5307
      if disk.dev_type == constants.LD_FILE:
5308
        raise errors.OpPrereqError("Export not supported for instances with"
5309
                                   " file-based disks")
5310

    
5311
  def Exec(self, feedback_fn):
5312
    """Export an instance to an image in the cluster.
5313

5314
    """
5315
    instance = self.instance
5316
    dst_node = self.dst_node
5317
    src_node = instance.primary_node
5318
    if self.op.shutdown:
5319
      # shutdown the instance, but not the disks
5320
      if not self.rpc.call_instance_shutdown(src_node, instance):
5321
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
5322
                                 (instance.name, src_node))
5323

    
5324
    vgname = self.cfg.GetVGName()
5325

    
5326
    snap_disks = []
5327

    
5328
    try:
5329
      for disk in instance.disks:
5330
        # new_dev_name will be a snapshot of an lvm leaf of the one we passed
5331
        new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk)
5332

    
5333
        if not new_dev_name:
5334
          self.LogWarning("Could not snapshot block device %s on node %s",
5335
                          disk.logical_id[1], src_node)
5336
          snap_disks.append(False)
5337
        else:
5338
          new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
5339
                                 logical_id=(vgname, new_dev_name),
5340
                                 physical_id=(vgname, new_dev_name),
5341
                                 iv_name=disk.iv_name)
5342
          snap_disks.append(new_dev)
5343

    
5344
    finally:
5345
      if self.op.shutdown and instance.status == "up":
5346
        if not self.rpc.call_instance_start(src_node, instance, None):
5347
          _ShutdownInstanceDisks(self, instance)
5348
          raise errors.OpExecError("Could not start instance")
5349

    
5350
    # TODO: check for size
5351

    
5352
    cluster_name = self.cfg.GetClusterName()
5353
    for idx, dev in enumerate(snap_disks):
5354
      if dev:
5355
        if not self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
5356
                                             instance, cluster_name, idx):
5357
          self.LogWarning("Could not export block device %s from node %s to"
5358
                          " node %s", dev.logical_id[1], src_node,
5359
                          dst_node.name)
5360
        if not self.rpc.call_blockdev_remove(src_node, dev):
5361
          self.LogWarning("Could not remove snapshot block device %s from node"
5362
                          " %s", dev.logical_id[1], src_node)
5363

    
5364
    if not self.rpc.call_finalize_export(dst_node.name, instance, snap_disks):
5365
      self.LogWarning("Could not finalize export for instance %s on node %s",
5366
                      instance.name, dst_node.name)
5367

    
5368
    nodelist = self.cfg.GetNodeList()
5369
    nodelist.remove(dst_node.name)
5370

    
5371
    # on one-node clusters nodelist will be empty after the removal
5372
    # if we proceed the backup would be removed because OpQueryExports
5373
    # substitutes an empty list with the full cluster node list.
5374
    if nodelist:
5375
      exportlist = self.rpc.call_export_list(nodelist)
5376
      for node in exportlist:
5377
        if instance.name in exportlist[node]:
5378
          if not self.rpc.call_export_remove(node, instance.name):
5379
            self.LogWarning("Could not remove older export for instance %s"
5380
                            " on node %s", instance.name, node)
5381

    
5382

    
5383
class LURemoveExport(NoHooksLU):
5384
  """Remove exports related to the named instance.
5385

5386
  """
5387
  _OP_REQP = ["instance_name"]
5388
  REQ_BGL = False
5389

    
5390
  def ExpandNames(self):
5391
    self.needed_locks = {}
5392
    # We need all nodes to be locked in order for RemoveExport to work, but we
5393
    # don't need to lock the instance itself, as nothing will happen to it (and
5394
    # we can remove exports also for a removed instance)
5395
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5396

    
5397
  def CheckPrereq(self):
5398
    """Check prerequisites.
5399
    """
5400
    pass
5401

    
5402
  def Exec(self, feedback_fn):
5403
    """Remove any export.
5404

5405
    """
5406
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
5407
    # If the instance was not found we'll try with the name that was passed in.
5408
    # This will only work if it was an FQDN, though.
5409
    fqdn_warn = False
5410
    if not instance_name:
5411
      fqdn_warn = True
5412
      instance_name = self.op.instance_name
5413

    
5414
    exportlist = self.rpc.call_export_list(self.acquired_locks[
5415
      locking.LEVEL_NODE])
5416
    found = False
5417
    for node in exportlist:
5418
      if instance_name in exportlist[node]:
5419
        found = True
5420
        if not self.rpc.call_export_remove(node, instance_name):
5421
          logging.error("Could not remove export for instance %s"
5422
                        " on node %s", instance_name, node)
5423

    
5424
    if fqdn_warn and not found:
5425
      feedback_fn("Export not found. If trying to remove an export belonging"
5426
                  " to a deleted instance please use its Fully Qualified"
5427
                  " Domain Name.")
5428

    
5429

    
5430
class TagsLU(NoHooksLU):
5431
  """Generic tags LU.
5432

5433
  This is an abstract class which is the parent of all the other tags LUs.
5434

5435
  """
5436

    
5437
  def ExpandNames(self):
5438
    self.needed_locks = {}
5439
    if self.op.kind == constants.TAG_NODE:
5440
      name = self.cfg.ExpandNodeName(self.op.name)
5441
      if name is None:
5442
        raise errors.OpPrereqError("Invalid node name (%s)" %
5443
                                   (self.op.name,))
5444
      self.op.name = name
5445
      self.needed_locks[locking.LEVEL_NODE] = name
5446
    elif self.op.kind == constants.TAG_INSTANCE:
5447
      name = self.cfg.ExpandInstanceName(self.op.name)
5448
      if name is None:
5449
        raise errors.OpPrereqError("Invalid instance name (%s)" %
5450
                                   (self.op.name,))
5451
      self.op.name = name
5452
      self.needed_locks[locking.LEVEL_INSTANCE] = name
5453

    
5454
  def CheckPrereq(self):
5455
    """Check prerequisites.
5456

5457
    """
5458
    if self.op.kind == constants.TAG_CLUSTER:
5459
      self.target = self.cfg.GetClusterInfo()
5460
    elif self.op.kind == constants.TAG_NODE:
5461
      self.target = self.cfg.GetNodeInfo(self.op.name)
5462
    elif self.op.kind == constants.TAG_INSTANCE:
5463
      self.target = self.cfg.GetInstanceInfo(self.op.name)
5464
    else:
5465
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
5466
                                 str(self.op.kind))
5467

    
5468

    
5469
class LUGetTags(TagsLU):
5470
  """Returns the tags of a given object.
5471

5472
  """
5473
  _OP_REQP = ["kind", "name"]
5474
  REQ_BGL = False
5475

    
5476
  def Exec(self, feedback_fn):
5477
    """Returns the tag list.
5478

5479
    """
5480
    return list(self.target.GetTags())
5481

    
5482

    
5483
class LUSearchTags(NoHooksLU):
5484
  """Searches the tags for a given pattern.
5485

5486
  """
5487
  _OP_REQP = ["pattern"]
5488
  REQ_BGL = False
5489

    
5490
  def ExpandNames(self):
5491
    self.needed_locks = {}
5492

    
5493
  def CheckPrereq(self):
5494
    """Check prerequisites.
5495

5496
    This checks the pattern passed for validity by compiling it.
5497

5498
    """
5499
    try:
5500
      self.re = re.compile(self.op.pattern)
5501
    except re.error, err:
5502
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
5503
                                 (self.op.pattern, err))
5504

    
5505
  def Exec(self, feedback_fn):
5506
    """Returns the tag list.
5507

5508
    """
5509
    cfg = self.cfg
5510
    tgts = [("/cluster", cfg.GetClusterInfo())]
5511
    ilist = cfg.GetAllInstancesInfo().values()
5512
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
5513
    nlist = cfg.GetAllNodesInfo().values()
5514
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
5515
    results = []
5516
    for path, target in tgts:
5517
      for tag in target.GetTags():
5518
        if self.re.search(tag):
5519
          results.append((path, tag))
5520
    return results
5521

    
5522

    
5523
class LUAddTags(TagsLU):
5524
  """Sets a tag on a given object.
5525

5526
  """
5527
  _OP_REQP = ["kind", "name", "tags"]
5528
  REQ_BGL = False
5529

    
5530
  def CheckPrereq(self):
5531
    """Check prerequisites.
5532

5533
    This checks the type and length of the tag name and value.
5534

5535
    """
5536
    TagsLU.CheckPrereq(self)
5537
    for tag in self.op.tags:
5538
      objects.TaggableObject.ValidateTag(tag)
5539

    
5540
  def Exec(self, feedback_fn):
5541
    """Sets the tag.
5542

5543
    """
5544
    try:
5545
      for tag in self.op.tags:
5546
        self.target.AddTag(tag)
5547
    except errors.TagError, err:
5548
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
5549
    try:
5550
      self.cfg.Update(self.target)
5551
    except errors.ConfigurationError:
5552
      raise errors.OpRetryError("There has been a modification to the"
5553
                                " config file and the operation has been"
5554
                                " aborted. Please retry.")
5555

    
5556

    
5557
class LUDelTags(TagsLU):
5558
  """Delete a list of tags from a given object.
5559

5560
  """
5561
  _OP_REQP = ["kind", "name", "tags"]
5562
  REQ_BGL = False
5563

    
5564
  def CheckPrereq(self):
5565
    """Check prerequisites.
5566

5567
    This checks that we have the given tag.
5568

5569
    """
5570
    TagsLU.CheckPrereq(self)
5571
    for tag in self.op.tags:
5572
      objects.TaggableObject.ValidateTag(tag)
5573
    del_tags = frozenset(self.op.tags)
5574
    cur_tags = self.target.GetTags()
5575
    if not del_tags <= cur_tags:
5576
      diff_tags = del_tags - cur_tags
5577
      diff_names = ["'%s'" % tag for tag in diff_tags]
5578
      diff_names.sort()
5579
      raise errors.OpPrereqError("Tag(s) %s not found" %
5580
                                 (",".join(diff_names)))
5581

    
5582
  def Exec(self, feedback_fn):
5583
    """Remove the tag from the object.
5584

5585
    """
5586
    for tag in self.op.tags:
5587
      self.target.RemoveTag(tag)
5588
    try:
5589
      self.cfg.Update(self.target)
5590
    except errors.ConfigurationError:
5591
      raise errors.OpRetryError("There has been a modification to the"
5592
                                " config file and the operation has been"
5593
                                " aborted. Please retry.")
5594

    
5595

    
5596
class LUTestDelay(NoHooksLU):
5597
  """Sleep for a specified amount of time.
5598

5599
  This LU sleeps on the master and/or nodes for a specified amount of
5600
  time.
5601

5602
  """
5603
  _OP_REQP = ["duration", "on_master", "on_nodes"]
5604
  REQ_BGL = False
5605

    
5606
  def ExpandNames(self):
5607
    """Expand names and set required locks.
5608

5609
    This expands the node list, if any.
5610

5611
    """
5612
    self.needed_locks = {}
5613
    if self.op.on_nodes:
5614
      # _GetWantedNodes can be used here, but is not always appropriate to use
5615
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
5616
      # more information.
5617
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
5618
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
5619

    
5620
  def CheckPrereq(self):
5621
    """Check prerequisites.
5622

5623
    """
5624

    
5625
  def Exec(self, feedback_fn):
5626
    """Do the actual sleep.
5627

5628
    """
5629
    if self.op.on_master:
5630
      if not utils.TestDelay(self.op.duration):
5631
        raise errors.OpExecError("Error during master delay test")
5632
    if self.op.on_nodes:
5633
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
5634
      if not result:
5635
        raise errors.OpExecError("Complete failure from rpc call")
5636
      for node, node_result in result.items():
5637
        if not node_result:
5638
          raise errors.OpExecError("Failure during rpc call to node %s,"
5639
                                   " result: %s" % (node, node_result))
5640

    
5641

    
5642
class IAllocator(object):
5643
  """IAllocator framework.
5644

5645
  An IAllocator instance has three sets of attributes:
5646
    - cfg that is needed to query the cluster
5647
    - input data (all members of the _KEYS class attribute are required)
5648
    - four buffer attributes (in|out_data|text), that represent the
5649
      input (to the external script) in text and data structure format,
5650
      and the output from it, again in two formats
5651
    - the result variables from the script (success, info, nodes) for
5652
      easy usage
5653

5654
  """
5655
  _ALLO_KEYS = [
5656
    "mem_size", "disks", "disk_template",
5657
    "os", "tags", "nics", "vcpus", "hypervisor",
5658
    ]
5659
  _RELO_KEYS = [
5660
    "relocate_from",
5661
    ]
5662

    
5663
  def __init__(self, lu, mode, name, **kwargs):
5664
    self.lu = lu
5665
    # init buffer variables
5666
    self.in_text = self.out_text = self.in_data = self.out_data = None
5667
    # init all input fields so that pylint is happy
5668
    self.mode = mode
5669
    self.name = name
5670
    self.mem_size = self.disks = self.disk_template = None
5671
    self.os = self.tags = self.nics = self.vcpus = None
5672
    self.relocate_from = None
5673
    # computed fields
5674
    self.required_nodes = None
5675
    # init result fields
5676
    self.success = self.info = self.nodes = None
5677
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5678
      keyset = self._ALLO_KEYS
5679
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
5680
      keyset = self._RELO_KEYS
5681
    else:
5682
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
5683
                                   " IAllocator" % self.mode)
5684
    for key in kwargs:
5685
      if key not in keyset:
5686
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
5687
                                     " IAllocator" % key)
5688
      setattr(self, key, kwargs[key])
5689
    for key in keyset:
5690
      if key not in kwargs:
5691
        raise errors.ProgrammerError("Missing input parameter '%s' to"
5692
                                     " IAllocator" % key)
5693
    self._BuildInputData()
5694

    
5695
  def _ComputeClusterData(self):
5696
    """Compute the generic allocator input data.
5697

5698
    This is the data that is independent of the actual operation.
5699

5700
    """
5701
    cfg = self.lu.cfg
5702
    cluster_info = cfg.GetClusterInfo()
5703
    # cluster data
5704
    data = {
5705
      "version": 1,
5706
      "cluster_name": cfg.GetClusterName(),
5707
      "cluster_tags": list(cluster_info.GetTags()),
5708
      "enable_hypervisors": list(cluster_info.enabled_hypervisors),
5709
      # we don't have job IDs
5710
      }
5711
    iinfo = cfg.GetAllInstancesInfo().values()
5712
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
5713

    
5714
    # node data
5715
    node_results = {}
5716
    node_list = cfg.GetNodeList()
5717

    
5718
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5719
      hypervisor = self.hypervisor
5720
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
5721
      hypervisor = cfg.GetInstanceInfo(self.name).hypervisor
5722

    
5723
    node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
5724
                                           hypervisor)
5725
    node_iinfo = self.lu.rpc.call_all_instances_info(node_list,
5726
                       cluster_info.enabled_hypervisors)
5727
    for nname in node_list:
5728
      ninfo = cfg.GetNodeInfo(nname)
5729
      if nname not in node_data or not isinstance(node_data[nname], dict):
5730
        raise errors.OpExecError("Can't get data for node %s" % nname)
5731
      remote_info = node_data[nname]
5732
      for attr in ['memory_total', 'memory_free', 'memory_dom0',
5733
                   'vg_size', 'vg_free', 'cpu_total']:
5734
        if attr not in remote_info:
5735
          raise errors.OpExecError("Node '%s' didn't return attribute '%s'" %
5736
                                   (nname, attr))
5737
        try:
5738
          remote_info[attr] = int(remote_info[attr])
5739
        except ValueError, err:
5740
          raise errors.OpExecError("Node '%s' returned invalid value for '%s':"
5741
                                   " %s" % (nname, attr, str(err)))
5742
      # compute memory used by primary instances
5743
      i_p_mem = i_p_up_mem = 0
5744
      for iinfo, beinfo in i_list:
5745
        if iinfo.primary_node == nname:
5746
          i_p_mem += beinfo[constants.BE_MEMORY]
5747
          if iinfo.name not in node_iinfo[nname]:
5748
            i_used_mem = 0
5749
          else:
5750
            i_used_mem = int(node_iinfo[nname][iinfo.name]['memory'])
5751
          i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
5752
          remote_info['memory_free'] -= max(0, i_mem_diff)
5753

    
5754
          if iinfo.status == "up":
5755
            i_p_up_mem += beinfo[constants.BE_MEMORY]
5756

    
5757
      # compute memory used by instances
5758
      pnr = {
5759
        "tags": list(ninfo.GetTags()),
5760
        "total_memory": remote_info['memory_total'],
5761
        "reserved_memory": remote_info['memory_dom0'],
5762
        "free_memory": remote_info['memory_free'],
5763
        "i_pri_memory": i_p_mem,
5764
        "i_pri_up_memory": i_p_up_mem,
5765
        "total_disk": remote_info['vg_size'],
5766
        "free_disk": remote_info['vg_free'],
5767
        "primary_ip": ninfo.primary_ip,
5768
        "secondary_ip": ninfo.secondary_ip,
5769
        "total_cpus": remote_info['cpu_total'],
5770
        }
5771
      node_results[nname] = pnr
5772
    data["nodes"] = node_results
5773

    
5774
    # instance data
5775
    instance_data = {}
5776
    for iinfo, beinfo in i_list:
5777
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
5778
                  for n in iinfo.nics]
5779
      pir = {
5780
        "tags": list(iinfo.GetTags()),
5781
        "should_run": iinfo.status == "up",
5782
        "vcpus": beinfo[constants.BE_VCPUS],
5783
        "memory": beinfo[constants.BE_MEMORY],
5784
        "os": iinfo.os,
5785
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
5786
        "nics": nic_data,
5787
        "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
5788
        "disk_template": iinfo.disk_template,
5789
        "hypervisor": iinfo.hypervisor,
5790
        }
5791
      instance_data[iinfo.name] = pir
5792

    
5793
    data["instances"] = instance_data
5794

    
5795
    self.in_data = data
5796

    
5797
  def _AddNewInstance(self):
5798
    """Add new instance data to allocator structure.
5799

5800
    This in combination with _AllocatorGetClusterData will create the
5801
    correct structure needed as input for the allocator.
5802

5803
    The checks for the completeness of the opcode must have already been
5804
    done.
5805

5806
    """
5807
    data = self.in_data
5808
    if len(self.disks) != 2:
5809
      raise errors.OpExecError("Only two-disk configurations supported")
5810

    
5811
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
5812

    
5813
    if self.disk_template in constants.DTS_NET_MIRROR:
5814
      self.required_nodes = 2
5815
    else:
5816
      self.required_nodes = 1
5817
    request = {
5818
      "type": "allocate",
5819
      "name": self.name,
5820
      "disk_template": self.disk_template,
5821
      "tags": self.tags,
5822
      "os": self.os,
5823
      "vcpus": self.vcpus,
5824
      "memory": self.mem_size,
5825
      "disks": self.disks,
5826
      "disk_space_total": disk_space,
5827
      "nics": self.nics,
5828
      "required_nodes": self.required_nodes,
5829
      }
5830
    data["request"] = request
5831

    
5832
  def _AddRelocateInstance(self):
5833
    """Add relocate instance data to allocator structure.
5834

5835
    This in combination with _IAllocatorGetClusterData will create the
5836
    correct structure needed as input for the allocator.
5837

5838
    The checks for the completeness of the opcode must have already been
5839
    done.
5840

5841
    """
5842
    instance = self.lu.cfg.GetInstanceInfo(self.name)
5843
    if instance is None:
5844
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
5845
                                   " IAllocator" % self.name)
5846

    
5847
    if instance.disk_template not in constants.DTS_NET_MIRROR:
5848
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
5849

    
5850
    if len(instance.secondary_nodes) != 1:
5851
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
5852

    
5853
    self.required_nodes = 1
5854
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
5855
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
5856

    
5857
    request = {
5858
      "type": "relocate",
5859
      "name": self.name,
5860
      "disk_space_total": disk_space,
5861
      "required_nodes": self.required_nodes,
5862
      "relocate_from": self.relocate_from,
5863
      }
5864
    self.in_data["request"] = request
5865

    
5866
  def _BuildInputData(self):
5867
    """Build input data structures.
5868

5869
    """
5870
    self._ComputeClusterData()
5871

    
5872
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5873
      self._AddNewInstance()
5874
    else:
5875
      self._AddRelocateInstance()
5876

    
5877
    self.in_text = serializer.Dump(self.in_data)
5878

    
5879
  def Run(self, name, validate=True, call_fn=None):
5880
    """Run an instance allocator and return the results.
5881

5882
    """
5883
    if call_fn is None:
5884
      call_fn = self.lu.rpc.call_iallocator_runner
5885
    data = self.in_text
5886

    
5887
    result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
5888

    
5889
    if not isinstance(result, (list, tuple)) or len(result) != 4:
5890
      raise errors.OpExecError("Invalid result from master iallocator runner")
5891

    
5892
    rcode, stdout, stderr, fail = result
5893

    
5894
    if rcode == constants.IARUN_NOTFOUND:
5895
      raise errors.OpExecError("Can't find allocator '%s'" % name)
5896
    elif rcode == constants.IARUN_FAILURE:
5897
      raise errors.OpExecError("Instance allocator call failed: %s,"
5898
                               " output: %s" % (fail, stdout+stderr))
5899
    self.out_text = stdout
5900
    if validate:
5901
      self._ValidateResult()
5902

    
5903
  def _ValidateResult(self):
5904
    """Process the allocator results.
5905

5906
    This will process and if successful save the result in
5907
    self.out_data and the other parameters.
5908

5909
    """
5910
    try:
5911
      rdict = serializer.Load(self.out_text)
5912
    except Exception, err:
5913
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
5914

    
5915
    if not isinstance(rdict, dict):
5916
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
5917

    
5918
    for key in "success", "info", "nodes":
5919
      if key not in rdict:
5920
        raise errors.OpExecError("Can't parse iallocator results:"
5921
                                 " missing key '%s'" % key)
5922
      setattr(self, key, rdict[key])
5923

    
5924
    if not isinstance(rdict["nodes"], list):
5925
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
5926
                               " is not a list")
5927
    self.out_data = rdict
5928

    
5929

    
5930
class LUTestAllocator(NoHooksLU):
5931
  """Run allocator tests.
5932

5933
  This LU runs the allocator tests
5934

5935
  """
5936
  _OP_REQP = ["direction", "mode", "name"]
5937

    
5938
  def CheckPrereq(self):
5939
    """Check prerequisites.
5940

5941
    This checks the opcode parameters depending on the director and mode test.
5942

5943
    """
5944
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
5945
      for attr in ["name", "mem_size", "disks", "disk_template",
5946
                   "os", "tags", "nics", "vcpus"]:
5947
        if not hasattr(self.op, attr):
5948
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
5949
                                     attr)
5950
      iname = self.cfg.ExpandInstanceName(self.op.name)
5951
      if iname is not None:
5952
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
5953
                                   iname)
5954
      if not isinstance(self.op.nics, list):
5955
        raise errors.OpPrereqError("Invalid parameter 'nics'")
5956
      for row in self.op.nics:
5957
        if (not isinstance(row, dict) or
5958
            "mac" not in row or
5959
            "ip" not in row or
5960
            "bridge" not in row):
5961
          raise errors.OpPrereqError("Invalid contents of the"
5962
                                     " 'nics' parameter")
5963
      if not isinstance(self.op.disks, list):
5964
        raise errors.OpPrereqError("Invalid parameter 'disks'")
5965
      if len(self.op.disks) != 2:
5966
        raise errors.OpPrereqError("Only two-disk configurations supported")
5967
      for row in self.op.disks:
5968
        if (not isinstance(row, dict) or
5969
            "size" not in row or
5970
            not isinstance(row["size"], int) or
5971
            "mode" not in row or
5972
            row["mode"] not in ['r', 'w']):
5973
          raise errors.OpPrereqError("Invalid contents of the"
5974
                                     " 'disks' parameter")
5975
      if self.op.hypervisor is None:
5976
        self.op.hypervisor = self.cfg.GetHypervisorType()
5977
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
5978
      if not hasattr(self.op, "name"):
5979
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
5980
      fname = self.cfg.ExpandInstanceName(self.op.name)
5981
      if fname is None:
5982
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
5983
                                   self.op.name)
5984
      self.op.name = fname
5985
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
5986
    else:
5987
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
5988
                                 self.op.mode)
5989

    
5990
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
5991
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
5992
        raise errors.OpPrereqError("Missing allocator name")
5993
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
5994
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
5995
                                 self.op.direction)
5996

    
5997
  def Exec(self, feedback_fn):
5998
    """Run the allocator test.
5999

6000
    """
6001
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
6002
      ial = IAllocator(self,
6003
                       mode=self.op.mode,
6004
                       name=self.op.name,
6005
                       mem_size=self.op.mem_size,
6006
                       disks=self.op.disks,
6007
                       disk_template=self.op.disk_template,
6008
                       os=self.op.os,
6009
                       tags=self.op.tags,
6010
                       nics=self.op.nics,
6011
                       vcpus=self.op.vcpus,
6012
                       hypervisor=self.op.hypervisor,
6013
                       )
6014
    else:
6015
      ial = IAllocator(self,
6016
                       mode=self.op.mode,
6017
                       name=self.op.name,
6018
                       relocate_from=list(self.relocate_from),
6019
                       )
6020

    
6021
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
6022
      result = ial.in_text
6023
    else:
6024
      ial.Run(self.op.allocator, validate=False)
6025
      result = ial.out_text
6026
    return result