Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 5e3d3eb3

History | View | Annotate | Download (250.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the master-side code."""
23

    
24
# pylint: disable-msg=W0613,W0201
25

    
26
import os
27
import os.path
28
import time
29
import tempfile
30
import re
31
import platform
32
import logging
33
import copy
34
import random
35

    
36
from ganeti import ssh
37
from ganeti import utils
38
from ganeti import errors
39
from ganeti import hypervisor
40
from ganeti import locking
41
from ganeti import constants
42
from ganeti import objects
43
from ganeti import opcodes
44
from ganeti import serializer
45
from ganeti import ssconf
46

    
47

    
48
class LogicalUnit(object):
49
  """Logical Unit base class.
50

51
  Subclasses must follow these rules:
52
    - implement ExpandNames
53
    - implement CheckPrereq
54
    - implement Exec
55
    - implement BuildHooksEnv
56
    - redefine HPATH and HTYPE
57
    - optionally redefine their run requirements:
58
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
59

60
  Note that all commands require root permissions.
61

62
  """
63
  HPATH = None
64
  HTYPE = None
65
  _OP_REQP = []
66
  REQ_BGL = True
67

    
68
  def __init__(self, processor, op, context, rpc):
69
    """Constructor for LogicalUnit.
70

71
    This needs to be overriden in derived classes in order to check op
72
    validity.
73

74
    """
75
    self.proc = processor
76
    self.op = op
77
    self.cfg = context.cfg
78
    self.context = context
79
    self.rpc = rpc
80
    # Dicts used to declare locking needs to mcpu
81
    self.needed_locks = None
82
    self.acquired_locks = {}
83
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
84
    self.add_locks = {}
85
    self.remove_locks = {}
86
    # Used to force good behavior when calling helper functions
87
    self.recalculate_locks = {}
88
    self.__ssh = None
89
    # logging
90
    self.LogWarning = processor.LogWarning
91
    self.LogInfo = processor.LogInfo
92

    
93
    for attr_name in self._OP_REQP:
94
      attr_val = getattr(op, attr_name, None)
95
      if attr_val is None:
96
        raise errors.OpPrereqError("Required parameter '%s' missing" %
97
                                   attr_name)
98
    self.CheckArguments()
99

    
100
  def __GetSSH(self):
101
    """Returns the SshRunner object
102

103
    """
104
    if not self.__ssh:
105
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
106
    return self.__ssh
107

    
108
  ssh = property(fget=__GetSSH)
109

    
110
  def CheckArguments(self):
111
    """Check syntactic validity for the opcode arguments.
112

113
    This method is for doing a simple syntactic check and ensure
114
    validity of opcode parameters, without any cluster-related
115
    checks. While the same can be accomplished in ExpandNames and/or
116
    CheckPrereq, doing these separate is better because:
117

118
      - ExpandNames is left as as purely a lock-related function
119
      - CheckPrereq is run after we have aquired locks (and possible
120
        waited for them)
121

122
    The function is allowed to change the self.op attribute so that
123
    later methods can no longer worry about missing parameters.
124

125
    """
126
    pass
127

    
128
  def ExpandNames(self):
129
    """Expand names for this LU.
130

131
    This method is called before starting to execute the opcode, and it should
132
    update all the parameters of the opcode to their canonical form (e.g. a
133
    short node name must be fully expanded after this method has successfully
134
    completed). This way locking, hooks, logging, ecc. can work correctly.
135

136
    LUs which implement this method must also populate the self.needed_locks
137
    member, as a dict with lock levels as keys, and a list of needed lock names
138
    as values. Rules:
139

140
      - use an empty dict if you don't need any lock
141
      - if you don't need any lock at a particular level omit that level
142
      - don't put anything for the BGL level
143
      - if you want all locks at a level use locking.ALL_SET as a value
144

145
    If you need to share locks (rather than acquire them exclusively) at one
146
    level you can modify self.share_locks, setting a true value (usually 1) for
147
    that level. By default locks are not shared.
148

149
    Examples::
150

151
      # Acquire all nodes and one instance
152
      self.needed_locks = {
153
        locking.LEVEL_NODE: locking.ALL_SET,
154
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
155
      }
156
      # Acquire just two nodes
157
      self.needed_locks = {
158
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
159
      }
160
      # Acquire no locks
161
      self.needed_locks = {} # No, you can't leave it to the default value None
162

163
    """
164
    # The implementation of this method is mandatory only if the new LU is
165
    # concurrent, so that old LUs don't need to be changed all at the same
166
    # time.
167
    if self.REQ_BGL:
168
      self.needed_locks = {} # Exclusive LUs don't need locks.
169
    else:
170
      raise NotImplementedError
171

    
172
  def DeclareLocks(self, level):
173
    """Declare LU locking needs for a level
174

175
    While most LUs can just declare their locking needs at ExpandNames time,
176
    sometimes there's the need to calculate some locks after having acquired
177
    the ones before. This function is called just before acquiring locks at a
178
    particular level, but after acquiring the ones at lower levels, and permits
179
    such calculations. It can be used to modify self.needed_locks, and by
180
    default it does nothing.
181

182
    This function is only called if you have something already set in
183
    self.needed_locks for the level.
184

185
    @param level: Locking level which is going to be locked
186
    @type level: member of ganeti.locking.LEVELS
187

188
    """
189

    
190
  def CheckPrereq(self):
191
    """Check prerequisites for this LU.
192

193
    This method should check that the prerequisites for the execution
194
    of this LU are fulfilled. It can do internode communication, but
195
    it should be idempotent - no cluster or system changes are
196
    allowed.
197

198
    The method should raise errors.OpPrereqError in case something is
199
    not fulfilled. Its return value is ignored.
200

201
    This method should also update all the parameters of the opcode to
202
    their canonical form if it hasn't been done by ExpandNames before.
203

204
    """
205
    raise NotImplementedError
206

    
207
  def Exec(self, feedback_fn):
208
    """Execute the LU.
209

210
    This method should implement the actual work. It should raise
211
    errors.OpExecError for failures that are somewhat dealt with in
212
    code, or expected.
213

214
    """
215
    raise NotImplementedError
216

    
217
  def BuildHooksEnv(self):
218
    """Build hooks environment for this LU.
219

220
    This method should return a three-node tuple consisting of: a dict
221
    containing the environment that will be used for running the
222
    specific hook for this LU, a list of node names on which the hook
223
    should run before the execution, and a list of node names on which
224
    the hook should run after the execution.
225

226
    The keys of the dict must not have 'GANETI_' prefixed as this will
227
    be handled in the hooks runner. Also note additional keys will be
228
    added by the hooks runner. If the LU doesn't define any
229
    environment, an empty dict (and not None) should be returned.
230

231
    No nodes should be returned as an empty list (and not None).
232

233
    Note that if the HPATH for a LU class is None, this function will
234
    not be called.
235

236
    """
237
    raise NotImplementedError
238

    
239
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
240
    """Notify the LU about the results of its hooks.
241

242
    This method is called every time a hooks phase is executed, and notifies
243
    the Logical Unit about the hooks' result. The LU can then use it to alter
244
    its result based on the hooks.  By default the method does nothing and the
245
    previous result is passed back unchanged but any LU can define it if it
246
    wants to use the local cluster hook-scripts somehow.
247

248
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
249
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
250
    @param hook_results: the results of the multi-node hooks rpc call
251
    @param feedback_fn: function used send feedback back to the caller
252
    @param lu_result: the previous Exec result this LU had, or None
253
        in the PRE phase
254
    @return: the new Exec result, based on the previous result
255
        and hook results
256

257
    """
258
    return lu_result
259

    
260
  def _ExpandAndLockInstance(self):
261
    """Helper function to expand and lock an instance.
262

263
    Many LUs that work on an instance take its name in self.op.instance_name
264
    and need to expand it and then declare the expanded name for locking. This
265
    function does it, and then updates self.op.instance_name to the expanded
266
    name. It also initializes needed_locks as a dict, if this hasn't been done
267
    before.
268

269
    """
270
    if self.needed_locks is None:
271
      self.needed_locks = {}
272
    else:
273
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
274
        "_ExpandAndLockInstance called with instance-level locks set"
275
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
276
    if expanded_name is None:
277
      raise errors.OpPrereqError("Instance '%s' not known" %
278
                                  self.op.instance_name)
279
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
280
    self.op.instance_name = expanded_name
281

    
282
  def _LockInstancesNodes(self, primary_only=False):
283
    """Helper function to declare instances' nodes for locking.
284

285
    This function should be called after locking one or more instances to lock
286
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
287
    with all primary or secondary nodes for instances already locked and
288
    present in self.needed_locks[locking.LEVEL_INSTANCE].
289

290
    It should be called from DeclareLocks, and for safety only works if
291
    self.recalculate_locks[locking.LEVEL_NODE] is set.
292

293
    In the future it may grow parameters to just lock some instance's nodes, or
294
    to just lock primaries or secondary nodes, if needed.
295

296
    If should be called in DeclareLocks in a way similar to::
297

298
      if level == locking.LEVEL_NODE:
299
        self._LockInstancesNodes()
300

301
    @type primary_only: boolean
302
    @param primary_only: only lock primary nodes of locked instances
303

304
    """
305
    assert locking.LEVEL_NODE in self.recalculate_locks, \
306
      "_LockInstancesNodes helper function called with no nodes to recalculate"
307

    
308
    # TODO: check if we're really been called with the instance locks held
309

    
310
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
311
    # future we might want to have different behaviors depending on the value
312
    # of self.recalculate_locks[locking.LEVEL_NODE]
313
    wanted_nodes = []
314
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
315
      instance = self.context.cfg.GetInstanceInfo(instance_name)
316
      wanted_nodes.append(instance.primary_node)
317
      if not primary_only:
318
        wanted_nodes.extend(instance.secondary_nodes)
319

    
320
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
321
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
322
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
323
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
324

    
325
    del self.recalculate_locks[locking.LEVEL_NODE]
326

    
327

    
328
class NoHooksLU(LogicalUnit):
329
  """Simple LU which runs no hooks.
330

331
  This LU is intended as a parent for other LogicalUnits which will
332
  run no hooks, in order to reduce duplicate code.
333

334
  """
335
  HPATH = None
336
  HTYPE = None
337

    
338

    
339
def _GetWantedNodes(lu, nodes):
340
  """Returns list of checked and expanded node names.
341

342
  @type lu: L{LogicalUnit}
343
  @param lu: the logical unit on whose behalf we execute
344
  @type nodes: list
345
  @param nodes: list of node names or None for all nodes
346
  @rtype: list
347
  @return: the list of nodes, sorted
348
  @raise errors.OpProgrammerError: if the nodes parameter is wrong type
349

350
  """
351
  if not isinstance(nodes, list):
352
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
353

    
354
  if not nodes:
355
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
356
      " non-empty list of nodes whose name is to be expanded.")
357

    
358
  wanted = []
359
  for name in nodes:
360
    node = lu.cfg.ExpandNodeName(name)
361
    if node is None:
362
      raise errors.OpPrereqError("No such node name '%s'" % name)
363
    wanted.append(node)
364

    
365
  return utils.NiceSort(wanted)
366

    
367

    
368
def _GetWantedInstances(lu, instances):
369
  """Returns list of checked and expanded instance names.
370

371
  @type lu: L{LogicalUnit}
372
  @param lu: the logical unit on whose behalf we execute
373
  @type instances: list
374
  @param instances: list of instance names or None for all instances
375
  @rtype: list
376
  @return: the list of instances, sorted
377
  @raise errors.OpPrereqError: if the instances parameter is wrong type
378
  @raise errors.OpPrereqError: if any of the passed instances is not found
379

380
  """
381
  if not isinstance(instances, list):
382
    raise errors.OpPrereqError("Invalid argument type 'instances'")
383

    
384
  if instances:
385
    wanted = []
386

    
387
    for name in instances:
388
      instance = lu.cfg.ExpandInstanceName(name)
389
      if instance is None:
390
        raise errors.OpPrereqError("No such instance name '%s'" % name)
391
      wanted.append(instance)
392

    
393
  else:
394
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
395
  return wanted
396

    
397

    
398
def _CheckOutputFields(static, dynamic, selected):
399
  """Checks whether all selected fields are valid.
400

401
  @type static: L{utils.FieldSet}
402
  @param static: static fields set
403
  @type dynamic: L{utils.FieldSet}
404
  @param dynamic: dynamic fields set
405

406
  """
407
  f = utils.FieldSet()
408
  f.Extend(static)
409
  f.Extend(dynamic)
410

    
411
  delta = f.NonMatching(selected)
412
  if delta:
413
    raise errors.OpPrereqError("Unknown output fields selected: %s"
414
                               % ",".join(delta))
415

    
416

    
417
def _CheckBooleanOpField(op, name):
418
  """Validates boolean opcode parameters.
419

420
  This will ensure that an opcode parameter is either a boolean value,
421
  or None (but that it always exists).
422

423
  """
424
  val = getattr(op, name, None)
425
  if not (val is None or isinstance(val, bool)):
426
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
427
                               (name, str(val)))
428
  setattr(op, name, val)
429

    
430

    
431
def _CheckNodeOnline(lu, node):
432
  """Ensure that a given node is online.
433

434
  @param lu: the LU on behalf of which we make the check
435
  @param node: the node to check
436
  @raise errors.OpPrereqError: if the node is offline
437

438
  """
439
  if lu.cfg.GetNodeInfo(node).offline:
440
    raise errors.OpPrereqError("Can't use offline node %s" % node)
441

    
442

    
443
def _CheckNodeNotDrained(lu, node):
444
  """Ensure that a given node is not drained.
445

446
  @param lu: the LU on behalf of which we make the check
447
  @param node: the node to check
448
  @raise errors.OpPrereqError: if the node is drained
449

450
  """
451
  if lu.cfg.GetNodeInfo(node).drained:
452
    raise errors.OpPrereqError("Can't use drained node %s" % node)
453

    
454

    
455
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
456
                          memory, vcpus, nics, disk_template, disks,
457
                          bep, hvp, hypervisor):
458
  """Builds instance related env variables for hooks
459

460
  This builds the hook environment from individual variables.
461

462
  @type name: string
463
  @param name: the name of the instance
464
  @type primary_node: string
465
  @param primary_node: the name of the instance's primary node
466
  @type secondary_nodes: list
467
  @param secondary_nodes: list of secondary nodes as strings
468
  @type os_type: string
469
  @param os_type: the name of the instance's OS
470
  @type status: boolean
471
  @param status: the should_run status of the instance
472
  @type memory: string
473
  @param memory: the memory size of the instance
474
  @type vcpus: string
475
  @param vcpus: the count of VCPUs the instance has
476
  @type nics: list
477
  @param nics: list of tuples (ip, mac, mode, link) representing
478
      the NICs the instance has
479
  @type disk_template: string
480
  @param disk_template: the distk template of the instance
481
  @type disks: list
482
  @param disks: the list of (size, mode) pairs
483
  @type bep: dict
484
  @param bep: the backend parameters for the instance
485
  @type hvp: dict
486
  @param hvp: the hypervisor parameters for the instance
487
  @type hypervisor: string
488
  @param hypervisor: the hypervisor for the instance
489
  @rtype: dict
490
  @return: the hook environment for this instance
491

492
  """
493
  if status:
494
    str_status = "up"
495
  else:
496
    str_status = "down"
497
  env = {
498
    "OP_TARGET": name,
499
    "INSTANCE_NAME": name,
500
    "INSTANCE_PRIMARY": primary_node,
501
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
502
    "INSTANCE_OS_TYPE": os_type,
503
    "INSTANCE_STATUS": str_status,
504
    "INSTANCE_MEMORY": memory,
505
    "INSTANCE_VCPUS": vcpus,
506
    "INSTANCE_DISK_TEMPLATE": disk_template,
507
    "INSTANCE_HYPERVISOR": hypervisor,
508
  }
509

    
510
  if nics:
511
    nic_count = len(nics)
512
    for idx, (ip, mac, mode, link) in enumerate(nics):
513
      if ip is None:
514
        ip = ""
515
      env["INSTANCE_NIC%d_IP" % idx] = ip
516
      env["INSTANCE_NIC%d_MAC" % idx] = mac
517
      env["INSTANCE_NIC%d_MODE" % idx] = mode
518
      env["INSTANCE_NIC%d_LINK" % idx] = link
519
      if mode == constants.NIC_MODE_BRIDGED:
520
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
521
  else:
522
    nic_count = 0
523

    
524
  env["INSTANCE_NIC_COUNT"] = nic_count
525

    
526
  if disks:
527
    disk_count = len(disks)
528
    for idx, (size, mode) in enumerate(disks):
529
      env["INSTANCE_DISK%d_SIZE" % idx] = size
530
      env["INSTANCE_DISK%d_MODE" % idx] = mode
531
  else:
532
    disk_count = 0
533

    
534
  env["INSTANCE_DISK_COUNT"] = disk_count
535

    
536
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
537
    for key, value in source.items():
538
      env["INSTANCE_%s_%s" % (kind, key)] = value
539

    
540
  return env
541

    
542
def _NICListToTuple(lu, nics):
543
  """Build a list of nic information tuples.
544

545
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
546
  value in LUQueryInstanceData.
547

548
  @type lu:  L{LogicalUnit}
549
  @param lu: the logical unit on whose behalf we execute
550
  @type nics: list of L{objects.NIC}
551
  @param nics: list of nics to convert to hooks tuples
552

553
  """
554
  hooks_nics = []
555
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[constants.PP_DEFAULT]
556
  for nic in nics:
557
    ip = nic.ip
558
    mac = nic.mac
559
    filled_params = objects.FillDict(c_nicparams, nic.nicparams)
560
    mode = filled_params[constants.NIC_MODE]
561
    link = filled_params[constants.NIC_LINK]
562
    hooks_nics.append((ip, mac, mode, link))
563
  return hooks_nics
564

    
565
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
566
  """Builds instance related env variables for hooks from an object.
567

568
  @type lu: L{LogicalUnit}
569
  @param lu: the logical unit on whose behalf we execute
570
  @type instance: L{objects.Instance}
571
  @param instance: the instance for which we should build the
572
      environment
573
  @type override: dict
574
  @param override: dictionary with key/values that will override
575
      our values
576
  @rtype: dict
577
  @return: the hook environment dictionary
578

579
  """
580
  cluster = lu.cfg.GetClusterInfo()
581
  bep = cluster.FillBE(instance)
582
  hvp = cluster.FillHV(instance)
583
  args = {
584
    'name': instance.name,
585
    'primary_node': instance.primary_node,
586
    'secondary_nodes': instance.secondary_nodes,
587
    'os_type': instance.os,
588
    'status': instance.admin_up,
589
    'memory': bep[constants.BE_MEMORY],
590
    'vcpus': bep[constants.BE_VCPUS],
591
    'nics': _NICListToTuple(lu, instance.nics),
592
    'disk_template': instance.disk_template,
593
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
594
    'bep': bep,
595
    'hvp': hvp,
596
    'hypervisor': instance.hypervisor,
597
  }
598
  if override:
599
    args.update(override)
600
  return _BuildInstanceHookEnv(**args)
601

    
602

    
603
def _AdjustCandidatePool(lu):
604
  """Adjust the candidate pool after node operations.
605

606
  """
607
  mod_list = lu.cfg.MaintainCandidatePool()
608
  if mod_list:
609
    lu.LogInfo("Promoted nodes to master candidate role: %s",
610
               ", ".join(node.name for node in mod_list))
611
    for name in mod_list:
612
      lu.context.ReaddNode(name)
613
  mc_now, mc_max = lu.cfg.GetMasterCandidateStats()
614
  if mc_now > mc_max:
615
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
616
               (mc_now, mc_max))
617

    
618

    
619
def _CheckNicsBridgesExist(lu, target_nics, target_node,
620
                               profile=constants.PP_DEFAULT):
621
  """Check that the brigdes needed by a list of nics exist.
622

623
  """
624
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[profile]
625
  paramslist = [objects.FillDict(c_nicparams, nic.nicparams)
626
                for nic in target_nics]
627
  brlist = [params[constants.NIC_LINK] for params in paramslist
628
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
629
  if brlist:
630
    result = lu.rpc.call_bridges_exist(target_node, brlist)
631
    result.Raise("Error checking bridges on destination node '%s'" %
632
                 target_node, prereq=True)
633

    
634

    
635
def _CheckInstanceBridgesExist(lu, instance, node=None):
636
  """Check that the brigdes needed by an instance exist.
637

638
  """
639
  if node is None:
640
    node=instance.primary_node
641
  _CheckNicsBridgesExist(lu, instance.nics, node)
642

    
643

    
644
class LUDestroyCluster(NoHooksLU):
645
  """Logical unit for destroying the cluster.
646

647
  """
648
  _OP_REQP = []
649

    
650
  def CheckPrereq(self):
651
    """Check prerequisites.
652

653
    This checks whether the cluster is empty.
654

655
    Any errors are signalled by raising errors.OpPrereqError.
656

657
    """
658
    master = self.cfg.GetMasterNode()
659

    
660
    nodelist = self.cfg.GetNodeList()
661
    if len(nodelist) != 1 or nodelist[0] != master:
662
      raise errors.OpPrereqError("There are still %d node(s) in"
663
                                 " this cluster." % (len(nodelist) - 1))
664
    instancelist = self.cfg.GetInstanceList()
665
    if instancelist:
666
      raise errors.OpPrereqError("There are still %d instance(s) in"
667
                                 " this cluster." % len(instancelist))
668

    
669
  def Exec(self, feedback_fn):
670
    """Destroys the cluster.
671

672
    """
673
    master = self.cfg.GetMasterNode()
674
    result = self.rpc.call_node_stop_master(master, False)
675
    result.Raise("Could not disable the master role")
676
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
677
    utils.CreateBackup(priv_key)
678
    utils.CreateBackup(pub_key)
679
    return master
680

    
681

    
682
class LUVerifyCluster(LogicalUnit):
683
  """Verifies the cluster status.
684

685
  """
686
  HPATH = "cluster-verify"
687
  HTYPE = constants.HTYPE_CLUSTER
688
  _OP_REQP = ["skip_checks"]
689
  REQ_BGL = False
690

    
691
  def ExpandNames(self):
692
    self.needed_locks = {
693
      locking.LEVEL_NODE: locking.ALL_SET,
694
      locking.LEVEL_INSTANCE: locking.ALL_SET,
695
    }
696
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
697

    
698
  def _VerifyNode(self, nodeinfo, file_list, local_cksum,
699
                  node_result, feedback_fn, master_files,
700
                  drbd_map, vg_name):
701
    """Run multiple tests against a node.
702

703
    Test list:
704

705
      - compares ganeti version
706
      - checks vg existance and size > 20G
707
      - checks config file checksum
708
      - checks ssh to other nodes
709

710
    @type nodeinfo: L{objects.Node}
711
    @param nodeinfo: the node to check
712
    @param file_list: required list of files
713
    @param local_cksum: dictionary of local files and their checksums
714
    @param node_result: the results from the node
715
    @param feedback_fn: function used to accumulate results
716
    @param master_files: list of files that only masters should have
717
    @param drbd_map: the useddrbd minors for this node, in
718
        form of minor: (instance, must_exist) which correspond to instances
719
        and their running status
720
    @param vg_name: Ganeti Volume Group (result of self.cfg.GetVGName())
721

722
    """
723
    node = nodeinfo.name
724

    
725
    # main result, node_result should be a non-empty dict
726
    if not node_result or not isinstance(node_result, dict):
727
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
728
      return True
729

    
730
    # compares ganeti version
731
    local_version = constants.PROTOCOL_VERSION
732
    remote_version = node_result.get('version', None)
733
    if not (remote_version and isinstance(remote_version, (list, tuple)) and
734
            len(remote_version) == 2):
735
      feedback_fn("  - ERROR: connection to %s failed" % (node))
736
      return True
737

    
738
    if local_version != remote_version[0]:
739
      feedback_fn("  - ERROR: incompatible protocol versions: master %s,"
740
                  " node %s %s" % (local_version, node, remote_version[0]))
741
      return True
742

    
743
    # node seems compatible, we can actually try to look into its results
744

    
745
    bad = False
746

    
747
    # full package version
748
    if constants.RELEASE_VERSION != remote_version[1]:
749
      feedback_fn("  - WARNING: software version mismatch: master %s,"
750
                  " node %s %s" %
751
                  (constants.RELEASE_VERSION, node, remote_version[1]))
752

    
753
    # checks vg existence and size > 20G
754
    if vg_name is not None:
755
      vglist = node_result.get(constants.NV_VGLIST, None)
756
      if not vglist:
757
        feedback_fn("  - ERROR: unable to check volume groups on node %s." %
758
                        (node,))
759
        bad = True
760
      else:
761
        vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
762
                                              constants.MIN_VG_SIZE)
763
        if vgstatus:
764
          feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
765
          bad = True
766

    
767
    # checks config file checksum
768

    
769
    remote_cksum = node_result.get(constants.NV_FILELIST, None)
770
    if not isinstance(remote_cksum, dict):
771
      bad = True
772
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
773
    else:
774
      for file_name in file_list:
775
        node_is_mc = nodeinfo.master_candidate
776
        must_have_file = file_name not in master_files
777
        if file_name not in remote_cksum:
778
          if node_is_mc or must_have_file:
779
            bad = True
780
            feedback_fn("  - ERROR: file '%s' missing" % file_name)
781
        elif remote_cksum[file_name] != local_cksum[file_name]:
782
          if node_is_mc or must_have_file:
783
            bad = True
784
            feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
785
          else:
786
            # not candidate and this is not a must-have file
787
            bad = True
788
            feedback_fn("  - ERROR: non master-candidate has old/wrong file"
789
                        " '%s'" % file_name)
790
        else:
791
          # all good, except non-master/non-must have combination
792
          if not node_is_mc and not must_have_file:
793
            feedback_fn("  - ERROR: file '%s' should not exist on non master"
794
                        " candidates" % file_name)
795

    
796
    # checks ssh to any
797

    
798
    if constants.NV_NODELIST not in node_result:
799
      bad = True
800
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
801
    else:
802
      if node_result[constants.NV_NODELIST]:
803
        bad = True
804
        for node in node_result[constants.NV_NODELIST]:
805
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
806
                          (node, node_result[constants.NV_NODELIST][node]))
807

    
808
    if constants.NV_NODENETTEST not in node_result:
809
      bad = True
810
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
811
    else:
812
      if node_result[constants.NV_NODENETTEST]:
813
        bad = True
814
        nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys())
815
        for node in nlist:
816
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
817
                          (node, node_result[constants.NV_NODENETTEST][node]))
818

    
819
    hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
820
    if isinstance(hyp_result, dict):
821
      for hv_name, hv_result in hyp_result.iteritems():
822
        if hv_result is not None:
823
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
824
                      (hv_name, hv_result))
825

    
826
    # check used drbd list
827
    if vg_name is not None:
828
      used_minors = node_result.get(constants.NV_DRBDLIST, [])
829
      if not isinstance(used_minors, (tuple, list)):
830
        feedback_fn("  - ERROR: cannot parse drbd status file: %s" %
831
                    str(used_minors))
832
      else:
833
        for minor, (iname, must_exist) in drbd_map.items():
834
          if minor not in used_minors and must_exist:
835
            feedback_fn("  - ERROR: drbd minor %d of instance %s is"
836
                        " not active" % (minor, iname))
837
            bad = True
838
        for minor in used_minors:
839
          if minor not in drbd_map:
840
            feedback_fn("  - ERROR: unallocated drbd minor %d is in use" %
841
                        minor)
842
            bad = True
843

    
844
    return bad
845

    
846
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
847
                      node_instance, feedback_fn, n_offline):
848
    """Verify an instance.
849

850
    This function checks to see if the required block devices are
851
    available on the instance's node.
852

853
    """
854
    bad = False
855

    
856
    node_current = instanceconfig.primary_node
857

    
858
    node_vol_should = {}
859
    instanceconfig.MapLVsByNode(node_vol_should)
860

    
861
    for node in node_vol_should:
862
      if node in n_offline:
863
        # ignore missing volumes on offline nodes
864
        continue
865
      for volume in node_vol_should[node]:
866
        if node not in node_vol_is or volume not in node_vol_is[node]:
867
          feedback_fn("  - ERROR: volume %s missing on node %s" %
868
                          (volume, node))
869
          bad = True
870

    
871
    if instanceconfig.admin_up:
872
      if ((node_current not in node_instance or
873
          not instance in node_instance[node_current]) and
874
          node_current not in n_offline):
875
        feedback_fn("  - ERROR: instance %s not running on node %s" %
876
                        (instance, node_current))
877
        bad = True
878

    
879
    for node in node_instance:
880
      if (not node == node_current):
881
        if instance in node_instance[node]:
882
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
883
                          (instance, node))
884
          bad = True
885

    
886
    return bad
887

    
888
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
889
    """Verify if there are any unknown volumes in the cluster.
890

891
    The .os, .swap and backup volumes are ignored. All other volumes are
892
    reported as unknown.
893

894
    """
895
    bad = False
896

    
897
    for node in node_vol_is:
898
      for volume in node_vol_is[node]:
899
        if node not in node_vol_should or volume not in node_vol_should[node]:
900
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
901
                      (volume, node))
902
          bad = True
903
    return bad
904

    
905
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
906
    """Verify the list of running instances.
907

908
    This checks what instances are running but unknown to the cluster.
909

910
    """
911
    bad = False
912
    for node in node_instance:
913
      for runninginstance in node_instance[node]:
914
        if runninginstance not in instancelist:
915
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
916
                          (runninginstance, node))
917
          bad = True
918
    return bad
919

    
920
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
921
    """Verify N+1 Memory Resilience.
922

923
    Check that if one single node dies we can still start all the instances it
924
    was primary for.
925

926
    """
927
    bad = False
928

    
929
    for node, nodeinfo in node_info.iteritems():
930
      # This code checks that every node which is now listed as secondary has
931
      # enough memory to host all instances it is supposed to should a single
932
      # other node in the cluster fail.
933
      # FIXME: not ready for failover to an arbitrary node
934
      # FIXME: does not support file-backed instances
935
      # WARNING: we currently take into account down instances as well as up
936
      # ones, considering that even if they're down someone might want to start
937
      # them even in the event of a node failure.
938
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
939
        needed_mem = 0
940
        for instance in instances:
941
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
942
          if bep[constants.BE_AUTO_BALANCE]:
943
            needed_mem += bep[constants.BE_MEMORY]
944
        if nodeinfo['mfree'] < needed_mem:
945
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
946
                      " failovers should node %s fail" % (node, prinode))
947
          bad = True
948
    return bad
949

    
950
  def CheckPrereq(self):
951
    """Check prerequisites.
952

953
    Transform the list of checks we're going to skip into a set and check that
954
    all its members are valid.
955

956
    """
957
    self.skip_set = frozenset(self.op.skip_checks)
958
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
959
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
960

    
961
  def BuildHooksEnv(self):
962
    """Build hooks env.
963

964
    Cluster-Verify hooks just rone in the post phase and their failure makes
965
    the output be logged in the verify output and the verification to fail.
966

967
    """
968
    all_nodes = self.cfg.GetNodeList()
969
    env = {
970
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
971
      }
972
    for node in self.cfg.GetAllNodesInfo().values():
973
      env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
974

    
975
    return env, [], all_nodes
976

    
977
  def Exec(self, feedback_fn):
978
    """Verify integrity of cluster, performing various test on nodes.
979

980
    """
981
    bad = False
982
    feedback_fn("* Verifying global settings")
983
    for msg in self.cfg.VerifyConfig():
984
      feedback_fn("  - ERROR: %s" % msg)
985

    
986
    vg_name = self.cfg.GetVGName()
987
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
988
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
989
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
990
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
991
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
992
                        for iname in instancelist)
993
    i_non_redundant = [] # Non redundant instances
994
    i_non_a_balanced = [] # Non auto-balanced instances
995
    n_offline = [] # List of offline nodes
996
    n_drained = [] # List of nodes being drained
997
    node_volume = {}
998
    node_instance = {}
999
    node_info = {}
1000
    instance_cfg = {}
1001

    
1002
    # FIXME: verify OS list
1003
    # do local checksums
1004
    master_files = [constants.CLUSTER_CONF_FILE]
1005

    
1006
    file_names = ssconf.SimpleStore().GetFileList()
1007
    file_names.append(constants.SSL_CERT_FILE)
1008
    file_names.append(constants.RAPI_CERT_FILE)
1009
    file_names.extend(master_files)
1010

    
1011
    local_checksums = utils.FingerprintFiles(file_names)
1012

    
1013
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
1014
    node_verify_param = {
1015
      constants.NV_FILELIST: file_names,
1016
      constants.NV_NODELIST: [node.name for node in nodeinfo
1017
                              if not node.offline],
1018
      constants.NV_HYPERVISOR: hypervisors,
1019
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
1020
                                  node.secondary_ip) for node in nodeinfo
1021
                                 if not node.offline],
1022
      constants.NV_INSTANCELIST: hypervisors,
1023
      constants.NV_VERSION: None,
1024
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
1025
      }
1026
    if vg_name is not None:
1027
      node_verify_param[constants.NV_VGLIST] = None
1028
      node_verify_param[constants.NV_LVLIST] = vg_name
1029
      node_verify_param[constants.NV_DRBDLIST] = None
1030
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
1031
                                           self.cfg.GetClusterName())
1032

    
1033
    cluster = self.cfg.GetClusterInfo()
1034
    master_node = self.cfg.GetMasterNode()
1035
    all_drbd_map = self.cfg.ComputeDRBDMap()
1036

    
1037
    for node_i in nodeinfo:
1038
      node = node_i.name
1039

    
1040
      if node_i.offline:
1041
        feedback_fn("* Skipping offline node %s" % (node,))
1042
        n_offline.append(node)
1043
        continue
1044

    
1045
      if node == master_node:
1046
        ntype = "master"
1047
      elif node_i.master_candidate:
1048
        ntype = "master candidate"
1049
      elif node_i.drained:
1050
        ntype = "drained"
1051
        n_drained.append(node)
1052
      else:
1053
        ntype = "regular"
1054
      feedback_fn("* Verifying node %s (%s)" % (node, ntype))
1055

    
1056
      msg = all_nvinfo[node].fail_msg
1057
      if msg:
1058
        feedback_fn("  - ERROR: while contacting node %s: %s" % (node, msg))
1059
        bad = True
1060
        continue
1061

    
1062
      nresult = all_nvinfo[node].payload
1063
      node_drbd = {}
1064
      for minor, instance in all_drbd_map[node].items():
1065
        if instance not in instanceinfo:
1066
          feedback_fn("  - ERROR: ghost instance '%s' in temporary DRBD map" %
1067
                      instance)
1068
          # ghost instance should not be running, but otherwise we
1069
          # don't give double warnings (both ghost instance and
1070
          # unallocated minor in use)
1071
          node_drbd[minor] = (instance, False)
1072
        else:
1073
          instance = instanceinfo[instance]
1074
          node_drbd[minor] = (instance.name, instance.admin_up)
1075
      result = self._VerifyNode(node_i, file_names, local_checksums,
1076
                                nresult, feedback_fn, master_files,
1077
                                node_drbd, vg_name)
1078
      bad = bad or result
1079

    
1080
      lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1081
      if vg_name is None:
1082
        node_volume[node] = {}
1083
      elif isinstance(lvdata, basestring):
1084
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
1085
                    (node, utils.SafeEncode(lvdata)))
1086
        bad = True
1087
        node_volume[node] = {}
1088
      elif not isinstance(lvdata, dict):
1089
        feedback_fn("  - ERROR: connection to %s failed (lvlist)" % (node,))
1090
        bad = True
1091
        continue
1092
      else:
1093
        node_volume[node] = lvdata
1094

    
1095
      # node_instance
1096
      idata = nresult.get(constants.NV_INSTANCELIST, None)
1097
      if not isinstance(idata, list):
1098
        feedback_fn("  - ERROR: connection to %s failed (instancelist)" %
1099
                    (node,))
1100
        bad = True
1101
        continue
1102

    
1103
      node_instance[node] = idata
1104

    
1105
      # node_info
1106
      nodeinfo = nresult.get(constants.NV_HVINFO, None)
1107
      if not isinstance(nodeinfo, dict):
1108
        feedback_fn("  - ERROR: connection to %s failed (hvinfo)" % (node,))
1109
        bad = True
1110
        continue
1111

    
1112
      try:
1113
        node_info[node] = {
1114
          "mfree": int(nodeinfo['memory_free']),
1115
          "pinst": [],
1116
          "sinst": [],
1117
          # dictionary holding all instances this node is secondary for,
1118
          # grouped by their primary node. Each key is a cluster node, and each
1119
          # value is a list of instances which have the key as primary and the
1120
          # current node as secondary.  this is handy to calculate N+1 memory
1121
          # availability if you can only failover from a primary to its
1122
          # secondary.
1123
          "sinst-by-pnode": {},
1124
        }
1125
        # FIXME: devise a free space model for file based instances as well
1126
        if vg_name is not None:
1127
          if (constants.NV_VGLIST not in nresult or
1128
              vg_name not in nresult[constants.NV_VGLIST]):
1129
            feedback_fn("  - ERROR: node %s didn't return data for the"
1130
                        " volume group '%s' - it is either missing or broken" %
1131
                        (node, vg_name))
1132
            bad = True
1133
            continue
1134
          node_info[node]["dfree"] = int(nresult[constants.NV_VGLIST][vg_name])
1135
      except (ValueError, KeyError):
1136
        feedback_fn("  - ERROR: invalid nodeinfo value returned"
1137
                    " from node %s" % (node,))
1138
        bad = True
1139
        continue
1140

    
1141
    node_vol_should = {}
1142

    
1143
    for instance in instancelist:
1144
      feedback_fn("* Verifying instance %s" % instance)
1145
      inst_config = instanceinfo[instance]
1146
      result =  self._VerifyInstance(instance, inst_config, node_volume,
1147
                                     node_instance, feedback_fn, n_offline)
1148
      bad = bad or result
1149
      inst_nodes_offline = []
1150

    
1151
      inst_config.MapLVsByNode(node_vol_should)
1152

    
1153
      instance_cfg[instance] = inst_config
1154

    
1155
      pnode = inst_config.primary_node
1156
      if pnode in node_info:
1157
        node_info[pnode]['pinst'].append(instance)
1158
      elif pnode not in n_offline:
1159
        feedback_fn("  - ERROR: instance %s, connection to primary node"
1160
                    " %s failed" % (instance, pnode))
1161
        bad = True
1162

    
1163
      if pnode in n_offline:
1164
        inst_nodes_offline.append(pnode)
1165

    
1166
      # If the instance is non-redundant we cannot survive losing its primary
1167
      # node, so we are not N+1 compliant. On the other hand we have no disk
1168
      # templates with more than one secondary so that situation is not well
1169
      # supported either.
1170
      # FIXME: does not support file-backed instances
1171
      if len(inst_config.secondary_nodes) == 0:
1172
        i_non_redundant.append(instance)
1173
      elif len(inst_config.secondary_nodes) > 1:
1174
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
1175
                    % instance)
1176

    
1177
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1178
        i_non_a_balanced.append(instance)
1179

    
1180
      for snode in inst_config.secondary_nodes:
1181
        if snode in node_info:
1182
          node_info[snode]['sinst'].append(instance)
1183
          if pnode not in node_info[snode]['sinst-by-pnode']:
1184
            node_info[snode]['sinst-by-pnode'][pnode] = []
1185
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
1186
        elif snode not in n_offline:
1187
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
1188
                      " %s failed" % (instance, snode))
1189
          bad = True
1190
        if snode in n_offline:
1191
          inst_nodes_offline.append(snode)
1192

    
1193
      if inst_nodes_offline:
1194
        # warn that the instance lives on offline nodes, and set bad=True
1195
        feedback_fn("  - ERROR: instance lives on offline node(s) %s" %
1196
                    ", ".join(inst_nodes_offline))
1197
        bad = True
1198

    
1199
    feedback_fn("* Verifying orphan volumes")
1200
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
1201
                                       feedback_fn)
1202
    bad = bad or result
1203

    
1204
    feedback_fn("* Verifying remaining instances")
1205
    result = self._VerifyOrphanInstances(instancelist, node_instance,
1206
                                         feedback_fn)
1207
    bad = bad or result
1208

    
1209
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1210
      feedback_fn("* Verifying N+1 Memory redundancy")
1211
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
1212
      bad = bad or result
1213

    
1214
    feedback_fn("* Other Notes")
1215
    if i_non_redundant:
1216
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1217
                  % len(i_non_redundant))
1218

    
1219
    if i_non_a_balanced:
1220
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1221
                  % len(i_non_a_balanced))
1222

    
1223
    if n_offline:
1224
      feedback_fn("  - NOTICE: %d offline node(s) found." % len(n_offline))
1225

    
1226
    if n_drained:
1227
      feedback_fn("  - NOTICE: %d drained node(s) found." % len(n_drained))
1228

    
1229
    return not bad
1230

    
1231
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1232
    """Analize the post-hooks' result
1233

1234
    This method analyses the hook result, handles it, and sends some
1235
    nicely-formatted feedback back to the user.
1236

1237
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1238
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1239
    @param hooks_results: the results of the multi-node hooks rpc call
1240
    @param feedback_fn: function used send feedback back to the caller
1241
    @param lu_result: previous Exec result
1242
    @return: the new Exec result, based on the previous result
1243
        and hook results
1244

1245
    """
1246
    # We only really run POST phase hooks, and are only interested in
1247
    # their results
1248
    if phase == constants.HOOKS_PHASE_POST:
1249
      # Used to change hooks' output to proper indentation
1250
      indent_re = re.compile('^', re.M)
1251
      feedback_fn("* Hooks Results")
1252
      if not hooks_results:
1253
        feedback_fn("  - ERROR: general communication failure")
1254
        lu_result = 1
1255
      else:
1256
        for node_name in hooks_results:
1257
          show_node_header = True
1258
          res = hooks_results[node_name]
1259
          msg = res.fail_msg
1260
          if msg:
1261
            if res.offline:
1262
              # no need to warn or set fail return value
1263
              continue
1264
            feedback_fn("    Communication failure in hooks execution: %s" %
1265
                        msg)
1266
            lu_result = 1
1267
            continue
1268
          for script, hkr, output in res.payload:
1269
            if hkr == constants.HKR_FAIL:
1270
              # The node header is only shown once, if there are
1271
              # failing hooks on that node
1272
              if show_node_header:
1273
                feedback_fn("  Node %s:" % node_name)
1274
                show_node_header = False
1275
              feedback_fn("    ERROR: Script %s failed, output:" % script)
1276
              output = indent_re.sub('      ', output)
1277
              feedback_fn("%s" % output)
1278
              lu_result = 1
1279

    
1280
      return lu_result
1281

    
1282

    
1283
class LUVerifyDisks(NoHooksLU):
1284
  """Verifies the cluster disks status.
1285

1286
  """
1287
  _OP_REQP = []
1288
  REQ_BGL = False
1289

    
1290
  def ExpandNames(self):
1291
    self.needed_locks = {
1292
      locking.LEVEL_NODE: locking.ALL_SET,
1293
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1294
    }
1295
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1296

    
1297
  def CheckPrereq(self):
1298
    """Check prerequisites.
1299

1300
    This has no prerequisites.
1301

1302
    """
1303
    pass
1304

    
1305
  def Exec(self, feedback_fn):
1306
    """Verify integrity of cluster disks.
1307

1308
    @rtype: tuple of three items
1309
    @return: a tuple of (dict of node-to-node_error, list of instances
1310
        which need activate-disks, dict of instance: (node, volume) for
1311
        missing volumes
1312

1313
    """
1314
    result = res_nodes, res_instances, res_missing = {}, [], {}
1315

    
1316
    vg_name = self.cfg.GetVGName()
1317
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1318
    instances = [self.cfg.GetInstanceInfo(name)
1319
                 for name in self.cfg.GetInstanceList()]
1320

    
1321
    nv_dict = {}
1322
    for inst in instances:
1323
      inst_lvs = {}
1324
      if (not inst.admin_up or
1325
          inst.disk_template not in constants.DTS_NET_MIRROR):
1326
        continue
1327
      inst.MapLVsByNode(inst_lvs)
1328
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1329
      for node, vol_list in inst_lvs.iteritems():
1330
        for vol in vol_list:
1331
          nv_dict[(node, vol)] = inst
1332

    
1333
    if not nv_dict:
1334
      return result
1335

    
1336
    node_lvs = self.rpc.call_volume_list(nodes, vg_name)
1337

    
1338
    to_act = set()
1339
    for node in nodes:
1340
      # node_volume
1341
      node_res = node_lvs[node]
1342
      if node_res.offline:
1343
        continue
1344
      msg = node_res.fail_msg
1345
      if msg:
1346
        logging.warning("Error enumerating LVs on node %s: %s", node, msg)
1347
        res_nodes[node] = msg
1348
        continue
1349

    
1350
      lvs = node_res.payload
1351
      for lv_name, (_, lv_inactive, lv_online) in lvs.items():
1352
        inst = nv_dict.pop((node, lv_name), None)
1353
        if (not lv_online and inst is not None
1354
            and inst.name not in res_instances):
1355
          res_instances.append(inst.name)
1356

    
1357
    # any leftover items in nv_dict are missing LVs, let's arrange the
1358
    # data better
1359
    for key, inst in nv_dict.iteritems():
1360
      if inst.name not in res_missing:
1361
        res_missing[inst.name] = []
1362
      res_missing[inst.name].append(key)
1363

    
1364
    return result
1365

    
1366

    
1367
class LURenameCluster(LogicalUnit):
1368
  """Rename the cluster.
1369

1370
  """
1371
  HPATH = "cluster-rename"
1372
  HTYPE = constants.HTYPE_CLUSTER
1373
  _OP_REQP = ["name"]
1374

    
1375
  def BuildHooksEnv(self):
1376
    """Build hooks env.
1377

1378
    """
1379
    env = {
1380
      "OP_TARGET": self.cfg.GetClusterName(),
1381
      "NEW_NAME": self.op.name,
1382
      }
1383
    mn = self.cfg.GetMasterNode()
1384
    return env, [mn], [mn]
1385

    
1386
  def CheckPrereq(self):
1387
    """Verify that the passed name is a valid one.
1388

1389
    """
1390
    hostname = utils.HostInfo(self.op.name)
1391

    
1392
    new_name = hostname.name
1393
    self.ip = new_ip = hostname.ip
1394
    old_name = self.cfg.GetClusterName()
1395
    old_ip = self.cfg.GetMasterIP()
1396
    if new_name == old_name and new_ip == old_ip:
1397
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1398
                                 " cluster has changed")
1399
    if new_ip != old_ip:
1400
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1401
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1402
                                   " reachable on the network. Aborting." %
1403
                                   new_ip)
1404

    
1405
    self.op.name = new_name
1406

    
1407
  def Exec(self, feedback_fn):
1408
    """Rename the cluster.
1409

1410
    """
1411
    clustername = self.op.name
1412
    ip = self.ip
1413

    
1414
    # shutdown the master IP
1415
    master = self.cfg.GetMasterNode()
1416
    result = self.rpc.call_node_stop_master(master, False)
1417
    result.Raise("Could not disable the master role")
1418

    
1419
    try:
1420
      cluster = self.cfg.GetClusterInfo()
1421
      cluster.cluster_name = clustername
1422
      cluster.master_ip = ip
1423
      self.cfg.Update(cluster)
1424

    
1425
      # update the known hosts file
1426
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
1427
      node_list = self.cfg.GetNodeList()
1428
      try:
1429
        node_list.remove(master)
1430
      except ValueError:
1431
        pass
1432
      result = self.rpc.call_upload_file(node_list,
1433
                                         constants.SSH_KNOWN_HOSTS_FILE)
1434
      for to_node, to_result in result.iteritems():
1435
        msg = to_result.fail_msg
1436
        if msg:
1437
          msg = ("Copy of file %s to node %s failed: %s" %
1438
                 (constants.SSH_KNOWN_HOSTS_FILE, to_node, msg))
1439
          self.proc.LogWarning(msg)
1440

    
1441
    finally:
1442
      result = self.rpc.call_node_start_master(master, False)
1443
      msg = result.fail_msg
1444
      if msg:
1445
        self.LogWarning("Could not re-enable the master role on"
1446
                        " the master, please restart manually: %s", msg)
1447

    
1448

    
1449
def _RecursiveCheckIfLVMBased(disk):
1450
  """Check if the given disk or its children are lvm-based.
1451

1452
  @type disk: L{objects.Disk}
1453
  @param disk: the disk to check
1454
  @rtype: booleean
1455
  @return: boolean indicating whether a LD_LV dev_type was found or not
1456

1457
  """
1458
  if disk.children:
1459
    for chdisk in disk.children:
1460
      if _RecursiveCheckIfLVMBased(chdisk):
1461
        return True
1462
  return disk.dev_type == constants.LD_LV
1463

    
1464

    
1465
class LUSetClusterParams(LogicalUnit):
1466
  """Change the parameters of the cluster.
1467

1468
  """
1469
  HPATH = "cluster-modify"
1470
  HTYPE = constants.HTYPE_CLUSTER
1471
  _OP_REQP = []
1472
  REQ_BGL = False
1473

    
1474
  def CheckArguments(self):
1475
    """Check parameters
1476

1477
    """
1478
    if not hasattr(self.op, "candidate_pool_size"):
1479
      self.op.candidate_pool_size = None
1480
    if self.op.candidate_pool_size is not None:
1481
      try:
1482
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1483
      except (ValueError, TypeError), err:
1484
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1485
                                   str(err))
1486
      if self.op.candidate_pool_size < 1:
1487
        raise errors.OpPrereqError("At least one master candidate needed")
1488

    
1489
  def ExpandNames(self):
1490
    # FIXME: in the future maybe other cluster params won't require checking on
1491
    # all nodes to be modified.
1492
    self.needed_locks = {
1493
      locking.LEVEL_NODE: locking.ALL_SET,
1494
    }
1495
    self.share_locks[locking.LEVEL_NODE] = 1
1496

    
1497
  def BuildHooksEnv(self):
1498
    """Build hooks env.
1499

1500
    """
1501
    env = {
1502
      "OP_TARGET": self.cfg.GetClusterName(),
1503
      "NEW_VG_NAME": self.op.vg_name,
1504
      }
1505
    mn = self.cfg.GetMasterNode()
1506
    return env, [mn], [mn]
1507

    
1508
  def CheckPrereq(self):
1509
    """Check prerequisites.
1510

1511
    This checks whether the given params don't conflict and
1512
    if the given volume group is valid.
1513

1514
    """
1515
    if self.op.vg_name is not None and not self.op.vg_name:
1516
      instances = self.cfg.GetAllInstancesInfo().values()
1517
      for inst in instances:
1518
        for disk in inst.disks:
1519
          if _RecursiveCheckIfLVMBased(disk):
1520
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1521
                                       " lvm-based instances exist")
1522

    
1523
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1524

    
1525
    # if vg_name not None, checks given volume group on all nodes
1526
    if self.op.vg_name:
1527
      vglist = self.rpc.call_vg_list(node_list)
1528
      for node in node_list:
1529
        msg = vglist[node].fail_msg
1530
        if msg:
1531
          # ignoring down node
1532
          self.LogWarning("Error while gathering data on node %s"
1533
                          " (ignoring node): %s", node, msg)
1534
          continue
1535
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
1536
                                              self.op.vg_name,
1537
                                              constants.MIN_VG_SIZE)
1538
        if vgstatus:
1539
          raise errors.OpPrereqError("Error on node '%s': %s" %
1540
                                     (node, vgstatus))
1541

    
1542
    self.cluster = cluster = self.cfg.GetClusterInfo()
1543
    # validate params changes
1544
    if self.op.beparams:
1545
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
1546
      self.new_beparams = objects.FillDict(
1547
        cluster.beparams[constants.PP_DEFAULT], self.op.beparams)
1548

    
1549
    if self.op.nicparams:
1550
      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
1551
      self.new_nicparams = objects.FillDict(
1552
        cluster.nicparams[constants.PP_DEFAULT], self.op.nicparams)
1553
      objects.NIC.CheckParameterSyntax(self.new_nicparams)
1554

    
1555
    # hypervisor list/parameters
1556
    self.new_hvparams = objects.FillDict(cluster.hvparams, {})
1557
    if self.op.hvparams:
1558
      if not isinstance(self.op.hvparams, dict):
1559
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1560
      for hv_name, hv_dict in self.op.hvparams.items():
1561
        if hv_name not in self.new_hvparams:
1562
          self.new_hvparams[hv_name] = hv_dict
1563
        else:
1564
          self.new_hvparams[hv_name].update(hv_dict)
1565

    
1566
    if self.op.enabled_hypervisors is not None:
1567
      self.hv_list = self.op.enabled_hypervisors
1568
    else:
1569
      self.hv_list = cluster.enabled_hypervisors
1570

    
1571
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1572
      # either the enabled list has changed, or the parameters have, validate
1573
      for hv_name, hv_params in self.new_hvparams.items():
1574
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1575
            (self.op.enabled_hypervisors and
1576
             hv_name in self.op.enabled_hypervisors)):
1577
          # either this is a new hypervisor, or its parameters have changed
1578
          hv_class = hypervisor.GetHypervisor(hv_name)
1579
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1580
          hv_class.CheckParameterSyntax(hv_params)
1581
          _CheckHVParams(self, node_list, hv_name, hv_params)
1582

    
1583
  def Exec(self, feedback_fn):
1584
    """Change the parameters of the cluster.
1585

1586
    """
1587
    if self.op.vg_name is not None:
1588
      new_volume = self.op.vg_name
1589
      if not new_volume:
1590
        new_volume = None
1591
      if new_volume != self.cfg.GetVGName():
1592
        self.cfg.SetVGName(new_volume)
1593
      else:
1594
        feedback_fn("Cluster LVM configuration already in desired"
1595
                    " state, not changing")
1596
    if self.op.hvparams:
1597
      self.cluster.hvparams = self.new_hvparams
1598
    if self.op.enabled_hypervisors is not None:
1599
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1600
    if self.op.beparams:
1601
      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
1602
    if self.op.nicparams:
1603
      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
1604

    
1605
    if self.op.candidate_pool_size is not None:
1606
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
1607

    
1608
    self.cfg.Update(self.cluster)
1609

    
1610
    # we want to update nodes after the cluster so that if any errors
1611
    # happen, we have recorded and saved the cluster info
1612
    if self.op.candidate_pool_size is not None:
1613
      _AdjustCandidatePool(self)
1614

    
1615

    
1616
def _RedistributeAncillaryFiles(lu, additional_nodes=None):
1617
  """Distribute additional files which are part of the cluster configuration.
1618

1619
  ConfigWriter takes care of distributing the config and ssconf files, but
1620
  there are more files which should be distributed to all nodes. This function
1621
  makes sure those are copied.
1622

1623
  @param lu: calling logical unit
1624
  @param additional_nodes: list of nodes not in the config to distribute to
1625

1626
  """
1627
  # 1. Gather target nodes
1628
  myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
1629
  dist_nodes = lu.cfg.GetNodeList()
1630
  if additional_nodes is not None:
1631
    dist_nodes.extend(additional_nodes)
1632
  if myself.name in dist_nodes:
1633
    dist_nodes.remove(myself.name)
1634
  # 2. Gather files to distribute
1635
  dist_files = set([constants.ETC_HOSTS,
1636
                    constants.SSH_KNOWN_HOSTS_FILE,
1637
                    constants.RAPI_CERT_FILE,
1638
                    constants.RAPI_USERS_FILE,
1639
                   ])
1640

    
1641
  enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
1642
  for hv_name in enabled_hypervisors:
1643
    hv_class = hypervisor.GetHypervisor(hv_name)
1644
    dist_files.update(hv_class.GetAncillaryFiles())
1645

    
1646
  # 3. Perform the files upload
1647
  for fname in dist_files:
1648
    if os.path.exists(fname):
1649
      result = lu.rpc.call_upload_file(dist_nodes, fname)
1650
      for to_node, to_result in result.items():
1651
        msg = to_result.fail_msg
1652
        if msg:
1653
          msg = ("Copy of file %s to node %s failed: %s" %
1654
                 (fname, to_node, msg))
1655
          lu.proc.LogWarning(msg)
1656

    
1657

    
1658
class LURedistributeConfig(NoHooksLU):
1659
  """Force the redistribution of cluster configuration.
1660

1661
  This is a very simple LU.
1662

1663
  """
1664
  _OP_REQP = []
1665
  REQ_BGL = False
1666

    
1667
  def ExpandNames(self):
1668
    self.needed_locks = {
1669
      locking.LEVEL_NODE: locking.ALL_SET,
1670
    }
1671
    self.share_locks[locking.LEVEL_NODE] = 1
1672

    
1673
  def CheckPrereq(self):
1674
    """Check prerequisites.
1675

1676
    """
1677

    
1678
  def Exec(self, feedback_fn):
1679
    """Redistribute the configuration.
1680

1681
    """
1682
    self.cfg.Update(self.cfg.GetClusterInfo())
1683
    _RedistributeAncillaryFiles(self)
1684

    
1685

    
1686
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1687
  """Sleep and poll for an instance's disk to sync.
1688

1689
  """
1690
  if not instance.disks:
1691
    return True
1692

    
1693
  if not oneshot:
1694
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1695

    
1696
  node = instance.primary_node
1697

    
1698
  for dev in instance.disks:
1699
    lu.cfg.SetDiskID(dev, node)
1700

    
1701
  retries = 0
1702
  degr_retries = 10 # in seconds, as we sleep 1 second each time
1703
  while True:
1704
    max_time = 0
1705
    done = True
1706
    cumul_degraded = False
1707
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1708
    msg = rstats.fail_msg
1709
    if msg:
1710
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
1711
      retries += 1
1712
      if retries >= 10:
1713
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1714
                                 " aborting." % node)
1715
      time.sleep(6)
1716
      continue
1717
    rstats = rstats.payload
1718
    retries = 0
1719
    for i, mstat in enumerate(rstats):
1720
      if mstat is None:
1721
        lu.LogWarning("Can't compute data for node %s/%s",
1722
                           node, instance.disks[i].iv_name)
1723
        continue
1724
      # we ignore the ldisk parameter
1725
      perc_done, est_time, is_degraded, _ = mstat
1726
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1727
      if perc_done is not None:
1728
        done = False
1729
        if est_time is not None:
1730
          rem_time = "%d estimated seconds remaining" % est_time
1731
          max_time = est_time
1732
        else:
1733
          rem_time = "no time estimate"
1734
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1735
                        (instance.disks[i].iv_name, perc_done, rem_time))
1736

    
1737
    # if we're done but degraded, let's do a few small retries, to
1738
    # make sure we see a stable and not transient situation; therefore
1739
    # we force restart of the loop
1740
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
1741
      logging.info("Degraded disks found, %d retries left", degr_retries)
1742
      degr_retries -= 1
1743
      time.sleep(1)
1744
      continue
1745

    
1746
    if done or oneshot:
1747
      break
1748

    
1749
    time.sleep(min(60, max_time))
1750

    
1751
  if done:
1752
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1753
  return not cumul_degraded
1754

    
1755

    
1756
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1757
  """Check that mirrors are not degraded.
1758

1759
  The ldisk parameter, if True, will change the test from the
1760
  is_degraded attribute (which represents overall non-ok status for
1761
  the device(s)) to the ldisk (representing the local storage status).
1762

1763
  """
1764
  lu.cfg.SetDiskID(dev, node)
1765
  if ldisk:
1766
    idx = 6
1767
  else:
1768
    idx = 5
1769

    
1770
  result = True
1771
  if on_primary or dev.AssembleOnSecondary():
1772
    rstats = lu.rpc.call_blockdev_find(node, dev)
1773
    msg = rstats.fail_msg
1774
    if msg:
1775
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
1776
      result = False
1777
    elif not rstats.payload:
1778
      lu.LogWarning("Can't find disk on node %s", node)
1779
      result = False
1780
    else:
1781
      result = result and (not rstats.payload[idx])
1782
  if dev.children:
1783
    for child in dev.children:
1784
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1785

    
1786
  return result
1787

    
1788

    
1789
class LUDiagnoseOS(NoHooksLU):
1790
  """Logical unit for OS diagnose/query.
1791

1792
  """
1793
  _OP_REQP = ["output_fields", "names"]
1794
  REQ_BGL = False
1795
  _FIELDS_STATIC = utils.FieldSet()
1796
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
1797

    
1798
  def ExpandNames(self):
1799
    if self.op.names:
1800
      raise errors.OpPrereqError("Selective OS query not supported")
1801

    
1802
    _CheckOutputFields(static=self._FIELDS_STATIC,
1803
                       dynamic=self._FIELDS_DYNAMIC,
1804
                       selected=self.op.output_fields)
1805

    
1806
    # Lock all nodes, in shared mode
1807
    # Temporary removal of locks, should be reverted later
1808
    # TODO: reintroduce locks when they are lighter-weight
1809
    self.needed_locks = {}
1810
    #self.share_locks[locking.LEVEL_NODE] = 1
1811
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1812

    
1813
  def CheckPrereq(self):
1814
    """Check prerequisites.
1815

1816
    """
1817

    
1818
  @staticmethod
1819
  def _DiagnoseByOS(node_list, rlist):
1820
    """Remaps a per-node return list into an a per-os per-node dictionary
1821

1822
    @param node_list: a list with the names of all nodes
1823
    @param rlist: a map with node names as keys and OS objects as values
1824

1825
    @rtype: dict
1826
    @return: a dictionary with osnames as keys and as value another map, with
1827
        nodes as keys and tuples of (path, status, diagnose) as values, eg::
1828

1829
          {"debian-etch": {"node1": [(/usr/lib/..., True, ""),
1830
                                     (/srv/..., False, "invalid api")],
1831
                           "node2": [(/srv/..., True, "")]}
1832
          }
1833

1834
    """
1835
    all_os = {}
1836
    # we build here the list of nodes that didn't fail the RPC (at RPC
1837
    # level), so that nodes with a non-responding node daemon don't
1838
    # make all OSes invalid
1839
    good_nodes = [node_name for node_name in rlist
1840
                  if not rlist[node_name].fail_msg]
1841
    for node_name, nr in rlist.items():
1842
      if nr.fail_msg or not nr.payload:
1843
        continue
1844
      for name, path, status, diagnose in nr.payload:
1845
        if name not in all_os:
1846
          # build a list of nodes for this os containing empty lists
1847
          # for each node in node_list
1848
          all_os[name] = {}
1849
          for nname in good_nodes:
1850
            all_os[name][nname] = []
1851
        all_os[name][node_name].append((path, status, diagnose))
1852
    return all_os
1853

    
1854
  def Exec(self, feedback_fn):
1855
    """Compute the list of OSes.
1856

1857
    """
1858
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
1859
    node_data = self.rpc.call_os_diagnose(valid_nodes)
1860
    pol = self._DiagnoseByOS(valid_nodes, node_data)
1861
    output = []
1862
    for os_name, os_data in pol.items():
1863
      row = []
1864
      for field in self.op.output_fields:
1865
        if field == "name":
1866
          val = os_name
1867
        elif field == "valid":
1868
          val = utils.all([osl and osl[0][1] for osl in os_data.values()])
1869
        elif field == "node_status":
1870
          # this is just a copy of the dict
1871
          val = {}
1872
          for node_name, nos_list in os_data.items():
1873
            val[node_name] = nos_list
1874
        else:
1875
          raise errors.ParameterError(field)
1876
        row.append(val)
1877
      output.append(row)
1878

    
1879
    return output
1880

    
1881

    
1882
class LURemoveNode(LogicalUnit):
1883
  """Logical unit for removing a node.
1884

1885
  """
1886
  HPATH = "node-remove"
1887
  HTYPE = constants.HTYPE_NODE
1888
  _OP_REQP = ["node_name"]
1889

    
1890
  def BuildHooksEnv(self):
1891
    """Build hooks env.
1892

1893
    This doesn't run on the target node in the pre phase as a failed
1894
    node would then be impossible to remove.
1895

1896
    """
1897
    env = {
1898
      "OP_TARGET": self.op.node_name,
1899
      "NODE_NAME": self.op.node_name,
1900
      }
1901
    all_nodes = self.cfg.GetNodeList()
1902
    all_nodes.remove(self.op.node_name)
1903
    return env, all_nodes, all_nodes
1904

    
1905
  def CheckPrereq(self):
1906
    """Check prerequisites.
1907

1908
    This checks:
1909
     - the node exists in the configuration
1910
     - it does not have primary or secondary instances
1911
     - it's not the master
1912

1913
    Any errors are signalled by raising errors.OpPrereqError.
1914

1915
    """
1916
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1917
    if node is None:
1918
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1919

    
1920
    instance_list = self.cfg.GetInstanceList()
1921

    
1922
    masternode = self.cfg.GetMasterNode()
1923
    if node.name == masternode:
1924
      raise errors.OpPrereqError("Node is the master node,"
1925
                                 " you need to failover first.")
1926

    
1927
    for instance_name in instance_list:
1928
      instance = self.cfg.GetInstanceInfo(instance_name)
1929
      if node.name in instance.all_nodes:
1930
        raise errors.OpPrereqError("Instance %s is still running on the node,"
1931
                                   " please remove first." % instance_name)
1932
    self.op.node_name = node.name
1933
    self.node = node
1934

    
1935
  def Exec(self, feedback_fn):
1936
    """Removes the node from the cluster.
1937

1938
    """
1939
    node = self.node
1940
    logging.info("Stopping the node daemon and removing configs from node %s",
1941
                 node.name)
1942

    
1943
    self.context.RemoveNode(node.name)
1944

    
1945
    result = self.rpc.call_node_leave_cluster(node.name)
1946
    msg = result.fail_msg
1947
    if msg:
1948
      self.LogWarning("Errors encountered on the remote node while leaving"
1949
                      " the cluster: %s", msg)
1950

    
1951
    # Promote nodes to master candidate as needed
1952
    _AdjustCandidatePool(self)
1953

    
1954

    
1955
class LUQueryNodes(NoHooksLU):
1956
  """Logical unit for querying nodes.
1957

1958
  """
1959
  _OP_REQP = ["output_fields", "names", "use_locking"]
1960
  REQ_BGL = False
1961
  _FIELDS_DYNAMIC = utils.FieldSet(
1962
    "dtotal", "dfree",
1963
    "mtotal", "mnode", "mfree",
1964
    "bootid",
1965
    "ctotal", "cnodes", "csockets",
1966
    )
1967

    
1968
  _FIELDS_STATIC = utils.FieldSet(
1969
    "name", "pinst_cnt", "sinst_cnt",
1970
    "pinst_list", "sinst_list",
1971
    "pip", "sip", "tags",
1972
    "serial_no",
1973
    "master_candidate",
1974
    "master",
1975
    "offline",
1976
    "drained",
1977
    )
1978

    
1979
  def ExpandNames(self):
1980
    _CheckOutputFields(static=self._FIELDS_STATIC,
1981
                       dynamic=self._FIELDS_DYNAMIC,
1982
                       selected=self.op.output_fields)
1983

    
1984
    self.needed_locks = {}
1985
    self.share_locks[locking.LEVEL_NODE] = 1
1986

    
1987
    if self.op.names:
1988
      self.wanted = _GetWantedNodes(self, self.op.names)
1989
    else:
1990
      self.wanted = locking.ALL_SET
1991

    
1992
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
1993
    self.do_locking = self.do_node_query and self.op.use_locking
1994
    if self.do_locking:
1995
      # if we don't request only static fields, we need to lock the nodes
1996
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
1997

    
1998

    
1999
  def CheckPrereq(self):
2000
    """Check prerequisites.
2001

2002
    """
2003
    # The validation of the node list is done in the _GetWantedNodes,
2004
    # if non empty, and if empty, there's no validation to do
2005
    pass
2006

    
2007
  def Exec(self, feedback_fn):
2008
    """Computes the list of nodes and their attributes.
2009

2010
    """
2011
    all_info = self.cfg.GetAllNodesInfo()
2012
    if self.do_locking:
2013
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
2014
    elif self.wanted != locking.ALL_SET:
2015
      nodenames = self.wanted
2016
      missing = set(nodenames).difference(all_info.keys())
2017
      if missing:
2018
        raise errors.OpExecError(
2019
          "Some nodes were removed before retrieving their data: %s" % missing)
2020
    else:
2021
      nodenames = all_info.keys()
2022

    
2023
    nodenames = utils.NiceSort(nodenames)
2024
    nodelist = [all_info[name] for name in nodenames]
2025

    
2026
    # begin data gathering
2027

    
2028
    if self.do_node_query:
2029
      live_data = {}
2030
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
2031
                                          self.cfg.GetHypervisorType())
2032
      for name in nodenames:
2033
        nodeinfo = node_data[name]
2034
        if not nodeinfo.fail_msg and nodeinfo.payload:
2035
          nodeinfo = nodeinfo.payload
2036
          fn = utils.TryConvert
2037
          live_data[name] = {
2038
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
2039
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
2040
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
2041
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
2042
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
2043
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
2044
            "bootid": nodeinfo.get('bootid', None),
2045
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
2046
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
2047
            }
2048
        else:
2049
          live_data[name] = {}
2050
    else:
2051
      live_data = dict.fromkeys(nodenames, {})
2052

    
2053
    node_to_primary = dict([(name, set()) for name in nodenames])
2054
    node_to_secondary = dict([(name, set()) for name in nodenames])
2055

    
2056
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
2057
                             "sinst_cnt", "sinst_list"))
2058
    if inst_fields & frozenset(self.op.output_fields):
2059
      instancelist = self.cfg.GetInstanceList()
2060

    
2061
      for instance_name in instancelist:
2062
        inst = self.cfg.GetInstanceInfo(instance_name)
2063
        if inst.primary_node in node_to_primary:
2064
          node_to_primary[inst.primary_node].add(inst.name)
2065
        for secnode in inst.secondary_nodes:
2066
          if secnode in node_to_secondary:
2067
            node_to_secondary[secnode].add(inst.name)
2068

    
2069
    master_node = self.cfg.GetMasterNode()
2070

    
2071
    # end data gathering
2072

    
2073
    output = []
2074
    for node in nodelist:
2075
      node_output = []
2076
      for field in self.op.output_fields:
2077
        if field == "name":
2078
          val = node.name
2079
        elif field == "pinst_list":
2080
          val = list(node_to_primary[node.name])
2081
        elif field == "sinst_list":
2082
          val = list(node_to_secondary[node.name])
2083
        elif field == "pinst_cnt":
2084
          val = len(node_to_primary[node.name])
2085
        elif field == "sinst_cnt":
2086
          val = len(node_to_secondary[node.name])
2087
        elif field == "pip":
2088
          val = node.primary_ip
2089
        elif field == "sip":
2090
          val = node.secondary_ip
2091
        elif field == "tags":
2092
          val = list(node.GetTags())
2093
        elif field == "serial_no":
2094
          val = node.serial_no
2095
        elif field == "master_candidate":
2096
          val = node.master_candidate
2097
        elif field == "master":
2098
          val = node.name == master_node
2099
        elif field == "offline":
2100
          val = node.offline
2101
        elif field == "drained":
2102
          val = node.drained
2103
        elif self._FIELDS_DYNAMIC.Matches(field):
2104
          val = live_data[node.name].get(field, None)
2105
        else:
2106
          raise errors.ParameterError(field)
2107
        node_output.append(val)
2108
      output.append(node_output)
2109

    
2110
    return output
2111

    
2112

    
2113
class LUQueryNodeVolumes(NoHooksLU):
2114
  """Logical unit for getting volumes on node(s).
2115

2116
  """
2117
  _OP_REQP = ["nodes", "output_fields"]
2118
  REQ_BGL = False
2119
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
2120
  _FIELDS_STATIC = utils.FieldSet("node")
2121

    
2122
  def ExpandNames(self):
2123
    _CheckOutputFields(static=self._FIELDS_STATIC,
2124
                       dynamic=self._FIELDS_DYNAMIC,
2125
                       selected=self.op.output_fields)
2126

    
2127
    self.needed_locks = {}
2128
    self.share_locks[locking.LEVEL_NODE] = 1
2129
    if not self.op.nodes:
2130
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2131
    else:
2132
      self.needed_locks[locking.LEVEL_NODE] = \
2133
        _GetWantedNodes(self, self.op.nodes)
2134

    
2135
  def CheckPrereq(self):
2136
    """Check prerequisites.
2137

2138
    This checks that the fields required are valid output fields.
2139

2140
    """
2141
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
2142

    
2143
  def Exec(self, feedback_fn):
2144
    """Computes the list of nodes and their attributes.
2145

2146
    """
2147
    nodenames = self.nodes
2148
    volumes = self.rpc.call_node_volumes(nodenames)
2149

    
2150
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
2151
             in self.cfg.GetInstanceList()]
2152

    
2153
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
2154

    
2155
    output = []
2156
    for node in nodenames:
2157
      nresult = volumes[node]
2158
      if nresult.offline:
2159
        continue
2160
      msg = nresult.fail_msg
2161
      if msg:
2162
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
2163
        continue
2164

    
2165
      node_vols = nresult.payload[:]
2166
      node_vols.sort(key=lambda vol: vol['dev'])
2167

    
2168
      for vol in node_vols:
2169
        node_output = []
2170
        for field in self.op.output_fields:
2171
          if field == "node":
2172
            val = node
2173
          elif field == "phys":
2174
            val = vol['dev']
2175
          elif field == "vg":
2176
            val = vol['vg']
2177
          elif field == "name":
2178
            val = vol['name']
2179
          elif field == "size":
2180
            val = int(float(vol['size']))
2181
          elif field == "instance":
2182
            for inst in ilist:
2183
              if node not in lv_by_node[inst]:
2184
                continue
2185
              if vol['name'] in lv_by_node[inst][node]:
2186
                val = inst.name
2187
                break
2188
            else:
2189
              val = '-'
2190
          else:
2191
            raise errors.ParameterError(field)
2192
          node_output.append(str(val))
2193

    
2194
        output.append(node_output)
2195

    
2196
    return output
2197

    
2198

    
2199
class LUAddNode(LogicalUnit):
2200
  """Logical unit for adding node to the cluster.
2201

2202
  """
2203
  HPATH = "node-add"
2204
  HTYPE = constants.HTYPE_NODE
2205
  _OP_REQP = ["node_name"]
2206

    
2207
  def BuildHooksEnv(self):
2208
    """Build hooks env.
2209

2210
    This will run on all nodes before, and on all nodes + the new node after.
2211

2212
    """
2213
    env = {
2214
      "OP_TARGET": self.op.node_name,
2215
      "NODE_NAME": self.op.node_name,
2216
      "NODE_PIP": self.op.primary_ip,
2217
      "NODE_SIP": self.op.secondary_ip,
2218
      }
2219
    nodes_0 = self.cfg.GetNodeList()
2220
    nodes_1 = nodes_0 + [self.op.node_name, ]
2221
    return env, nodes_0, nodes_1
2222

    
2223
  def CheckPrereq(self):
2224
    """Check prerequisites.
2225

2226
    This checks:
2227
     - the new node is not already in the config
2228
     - it is resolvable
2229
     - its parameters (single/dual homed) matches the cluster
2230

2231
    Any errors are signalled by raising errors.OpPrereqError.
2232

2233
    """
2234
    node_name = self.op.node_name
2235
    cfg = self.cfg
2236

    
2237
    dns_data = utils.HostInfo(node_name)
2238

    
2239
    node = dns_data.name
2240
    primary_ip = self.op.primary_ip = dns_data.ip
2241
    secondary_ip = getattr(self.op, "secondary_ip", None)
2242
    if secondary_ip is None:
2243
      secondary_ip = primary_ip
2244
    if not utils.IsValidIP(secondary_ip):
2245
      raise errors.OpPrereqError("Invalid secondary IP given")
2246
    self.op.secondary_ip = secondary_ip
2247

    
2248
    node_list = cfg.GetNodeList()
2249
    if not self.op.readd and node in node_list:
2250
      raise errors.OpPrereqError("Node %s is already in the configuration" %
2251
                                 node)
2252
    elif self.op.readd and node not in node_list:
2253
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
2254

    
2255
    for existing_node_name in node_list:
2256
      existing_node = cfg.GetNodeInfo(existing_node_name)
2257

    
2258
      if self.op.readd and node == existing_node_name:
2259
        if (existing_node.primary_ip != primary_ip or
2260
            existing_node.secondary_ip != secondary_ip):
2261
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
2262
                                     " address configuration as before")
2263
        continue
2264

    
2265
      if (existing_node.primary_ip == primary_ip or
2266
          existing_node.secondary_ip == primary_ip or
2267
          existing_node.primary_ip == secondary_ip or
2268
          existing_node.secondary_ip == secondary_ip):
2269
        raise errors.OpPrereqError("New node ip address(es) conflict with"
2270
                                   " existing node %s" % existing_node.name)
2271

    
2272
    # check that the type of the node (single versus dual homed) is the
2273
    # same as for the master
2274
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
2275
    master_singlehomed = myself.secondary_ip == myself.primary_ip
2276
    newbie_singlehomed = secondary_ip == primary_ip
2277
    if master_singlehomed != newbie_singlehomed:
2278
      if master_singlehomed:
2279
        raise errors.OpPrereqError("The master has no private ip but the"
2280
                                   " new node has one")
2281
      else:
2282
        raise errors.OpPrereqError("The master has a private ip but the"
2283
                                   " new node doesn't have one")
2284

    
2285
    # checks reachablity
2286
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
2287
      raise errors.OpPrereqError("Node not reachable by ping")
2288

    
2289
    if not newbie_singlehomed:
2290
      # check reachability from my secondary ip to newbie's secondary ip
2291
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
2292
                           source=myself.secondary_ip):
2293
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
2294
                                   " based ping to noded port")
2295

    
2296
    cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2297
    mc_now, _ = self.cfg.GetMasterCandidateStats()
2298
    master_candidate = mc_now < cp_size
2299

    
2300
    self.new_node = objects.Node(name=node,
2301
                                 primary_ip=primary_ip,
2302
                                 secondary_ip=secondary_ip,
2303
                                 master_candidate=master_candidate,
2304
                                 offline=False, drained=False)
2305

    
2306
  def Exec(self, feedback_fn):
2307
    """Adds the new node to the cluster.
2308

2309
    """
2310
    new_node = self.new_node
2311
    node = new_node.name
2312

    
2313
    # check connectivity
2314
    result = self.rpc.call_version([node])[node]
2315
    result.Raise("Can't get version information from node %s" % node)
2316
    if constants.PROTOCOL_VERSION == result.payload:
2317
      logging.info("Communication to node %s fine, sw version %s match",
2318
                   node, result.payload)
2319
    else:
2320
      raise errors.OpExecError("Version mismatch master version %s,"
2321
                               " node version %s" %
2322
                               (constants.PROTOCOL_VERSION, result.payload))
2323

    
2324
    # setup ssh on node
2325
    logging.info("Copy ssh key to node %s", node)
2326
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
2327
    keyarray = []
2328
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
2329
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
2330
                priv_key, pub_key]
2331

    
2332
    for i in keyfiles:
2333
      f = open(i, 'r')
2334
      try:
2335
        keyarray.append(f.read())
2336
      finally:
2337
        f.close()
2338

    
2339
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
2340
                                    keyarray[2],
2341
                                    keyarray[3], keyarray[4], keyarray[5])
2342
    result.Raise("Cannot transfer ssh keys to the new node")
2343

    
2344
    # Add node to our /etc/hosts, and add key to known_hosts
2345
    if self.cfg.GetClusterInfo().modify_etc_hosts:
2346
      utils.AddHostToEtcHosts(new_node.name)
2347

    
2348
    if new_node.secondary_ip != new_node.primary_ip:
2349
      result = self.rpc.call_node_has_ip_address(new_node.name,
2350
                                                 new_node.secondary_ip)
2351
      result.Raise("Failure checking secondary ip on node %s" % new_node.name,
2352
                   prereq=True)
2353
      if not result.payload:
2354
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
2355
                                 " you gave (%s). Please fix and re-run this"
2356
                                 " command." % new_node.secondary_ip)
2357

    
2358
    node_verify_list = [self.cfg.GetMasterNode()]
2359
    node_verify_param = {
2360
      'nodelist': [node],
2361
      # TODO: do a node-net-test as well?
2362
    }
2363

    
2364
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
2365
                                       self.cfg.GetClusterName())
2366
    for verifier in node_verify_list:
2367
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
2368
      nl_payload = result[verifier].payload['nodelist']
2369
      if nl_payload:
2370
        for failed in nl_payload:
2371
          feedback_fn("ssh/hostname verification failed %s -> %s" %
2372
                      (verifier, nl_payload[failed]))
2373
        raise errors.OpExecError("ssh/hostname verification failed.")
2374

    
2375
    if self.op.readd:
2376
      _RedistributeAncillaryFiles(self)
2377
      self.context.ReaddNode(new_node)
2378
    else:
2379
      _RedistributeAncillaryFiles(self, additional_nodes=[node])
2380
      self.context.AddNode(new_node)
2381

    
2382

    
2383
class LUSetNodeParams(LogicalUnit):
2384
  """Modifies the parameters of a node.
2385

2386
  """
2387
  HPATH = "node-modify"
2388
  HTYPE = constants.HTYPE_NODE
2389
  _OP_REQP = ["node_name"]
2390
  REQ_BGL = False
2391

    
2392
  def CheckArguments(self):
2393
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2394
    if node_name is None:
2395
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2396
    self.op.node_name = node_name
2397
    _CheckBooleanOpField(self.op, 'master_candidate')
2398
    _CheckBooleanOpField(self.op, 'offline')
2399
    _CheckBooleanOpField(self.op, 'drained')
2400
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
2401
    if all_mods.count(None) == 3:
2402
      raise errors.OpPrereqError("Please pass at least one modification")
2403
    if all_mods.count(True) > 1:
2404
      raise errors.OpPrereqError("Can't set the node into more than one"
2405
                                 " state at the same time")
2406

    
2407
  def ExpandNames(self):
2408
    self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
2409

    
2410
  def BuildHooksEnv(self):
2411
    """Build hooks env.
2412

2413
    This runs on the master node.
2414

2415
    """
2416
    env = {
2417
      "OP_TARGET": self.op.node_name,
2418
      "MASTER_CANDIDATE": str(self.op.master_candidate),
2419
      "OFFLINE": str(self.op.offline),
2420
      "DRAINED": str(self.op.drained),
2421
      }
2422
    nl = [self.cfg.GetMasterNode(),
2423
          self.op.node_name]
2424
    return env, nl, nl
2425

    
2426
  def CheckPrereq(self):
2427
    """Check prerequisites.
2428

2429
    This only checks the instance list against the existing names.
2430

2431
    """
2432
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
2433

    
2434
    if ((self.op.master_candidate == False or self.op.offline == True or
2435
         self.op.drained == True) and node.master_candidate):
2436
      # we will demote the node from master_candidate
2437
      if self.op.node_name == self.cfg.GetMasterNode():
2438
        raise errors.OpPrereqError("The master node has to be a"
2439
                                   " master candidate, online and not drained")
2440
      cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2441
      num_candidates, _ = self.cfg.GetMasterCandidateStats()
2442
      if num_candidates <= cp_size:
2443
        msg = ("Not enough master candidates (desired"
2444
               " %d, new value will be %d)" % (cp_size, num_candidates-1))
2445
        if self.op.force:
2446
          self.LogWarning(msg)
2447
        else:
2448
          raise errors.OpPrereqError(msg)
2449

    
2450
    if (self.op.master_candidate == True and
2451
        ((node.offline and not self.op.offline == False) or
2452
         (node.drained and not self.op.drained == False))):
2453
      raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
2454
                                 " to master_candidate" % node.name)
2455

    
2456
    return
2457

    
2458
  def Exec(self, feedback_fn):
2459
    """Modifies a node.
2460

2461
    """
2462
    node = self.node
2463

    
2464
    result = []
2465
    changed_mc = False
2466

    
2467
    if self.op.offline is not None:
2468
      node.offline = self.op.offline
2469
      result.append(("offline", str(self.op.offline)))
2470
      if self.op.offline == True:
2471
        if node.master_candidate:
2472
          node.master_candidate = False
2473
          changed_mc = True
2474
          result.append(("master_candidate", "auto-demotion due to offline"))
2475
        if node.drained:
2476
          node.drained = False
2477
          result.append(("drained", "clear drained status due to offline"))
2478

    
2479
    if self.op.master_candidate is not None:
2480
      node.master_candidate = self.op.master_candidate
2481
      changed_mc = True
2482
      result.append(("master_candidate", str(self.op.master_candidate)))
2483
      if self.op.master_candidate == False:
2484
        rrc = self.rpc.call_node_demote_from_mc(node.name)
2485
        msg = rrc.fail_msg
2486
        if msg:
2487
          self.LogWarning("Node failed to demote itself: %s" % msg)
2488

    
2489
    if self.op.drained is not None:
2490
      node.drained = self.op.drained
2491
      result.append(("drained", str(self.op.drained)))
2492
      if self.op.drained == True:
2493
        if node.master_candidate:
2494
          node.master_candidate = False
2495
          changed_mc = True
2496
          result.append(("master_candidate", "auto-demotion due to drain"))
2497
        if node.offline:
2498
          node.offline = False
2499
          result.append(("offline", "clear offline status due to drain"))
2500

    
2501
    # this will trigger configuration file update, if needed
2502
    self.cfg.Update(node)
2503
    # this will trigger job queue propagation or cleanup
2504
    if changed_mc:
2505
      self.context.ReaddNode(node)
2506

    
2507
    return result
2508

    
2509

    
2510
class LUPowercycleNode(NoHooksLU):
2511
  """Powercycles a node.
2512

2513
  """
2514
  _OP_REQP = ["node_name", "force"]
2515
  REQ_BGL = False
2516

    
2517
  def CheckArguments(self):
2518
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2519
    if node_name is None:
2520
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2521
    self.op.node_name = node_name
2522
    if node_name == self.cfg.GetMasterNode() and not self.op.force:
2523
      raise errors.OpPrereqError("The node is the master and the force"
2524
                                 " parameter was not set")
2525

    
2526
  def ExpandNames(self):
2527
    """Locking for PowercycleNode.
2528

2529
    This is a last-resource option and shouldn't block on other
2530
    jobs. Therefore, we grab no locks.
2531

2532
    """
2533
    self.needed_locks = {}
2534

    
2535
  def CheckPrereq(self):
2536
    """Check prerequisites.
2537

2538
    This LU has no prereqs.
2539

2540
    """
2541
    pass
2542

    
2543
  def Exec(self, feedback_fn):
2544
    """Reboots a node.
2545

2546
    """
2547
    result = self.rpc.call_node_powercycle(self.op.node_name,
2548
                                           self.cfg.GetHypervisorType())
2549
    result.Raise("Failed to schedule the reboot")
2550
    return result.payload
2551

    
2552

    
2553
class LUQueryClusterInfo(NoHooksLU):
2554
  """Query cluster configuration.
2555

2556
  """
2557
  _OP_REQP = []
2558
  REQ_BGL = False
2559

    
2560
  def ExpandNames(self):
2561
    self.needed_locks = {}
2562

    
2563
  def CheckPrereq(self):
2564
    """No prerequsites needed for this LU.
2565

2566
    """
2567
    pass
2568

    
2569
  def Exec(self, feedback_fn):
2570
    """Return cluster config.
2571

2572
    """
2573
    cluster = self.cfg.GetClusterInfo()
2574
    result = {
2575
      "software_version": constants.RELEASE_VERSION,
2576
      "protocol_version": constants.PROTOCOL_VERSION,
2577
      "config_version": constants.CONFIG_VERSION,
2578
      "os_api_version": constants.OS_API_VERSION,
2579
      "export_version": constants.EXPORT_VERSION,
2580
      "architecture": (platform.architecture()[0], platform.machine()),
2581
      "name": cluster.cluster_name,
2582
      "master": cluster.master_node,
2583
      "default_hypervisor": cluster.default_hypervisor,
2584
      "enabled_hypervisors": cluster.enabled_hypervisors,
2585
      "hvparams": dict([(hypervisor, cluster.hvparams[hypervisor])
2586
                        for hypervisor in cluster.enabled_hypervisors]),
2587
      "beparams": cluster.beparams,
2588
      "nicparams": cluster.nicparams,
2589
      "candidate_pool_size": cluster.candidate_pool_size,
2590
      "master_netdev": cluster.master_netdev,
2591
      "volume_group_name": cluster.volume_group_name,
2592
      "file_storage_dir": cluster.file_storage_dir,
2593
      }
2594

    
2595
    return result
2596

    
2597

    
2598
class LUQueryConfigValues(NoHooksLU):
2599
  """Return configuration values.
2600

2601
  """
2602
  _OP_REQP = []
2603
  REQ_BGL = False
2604
  _FIELDS_DYNAMIC = utils.FieldSet()
2605
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag")
2606

    
2607
  def ExpandNames(self):
2608
    self.needed_locks = {}
2609

    
2610
    _CheckOutputFields(static=self._FIELDS_STATIC,
2611
                       dynamic=self._FIELDS_DYNAMIC,
2612
                       selected=self.op.output_fields)
2613

    
2614
  def CheckPrereq(self):
2615
    """No prerequisites.
2616

2617
    """
2618
    pass
2619

    
2620
  def Exec(self, feedback_fn):
2621
    """Dump a representation of the cluster config to the standard output.
2622

2623
    """
2624
    values = []
2625
    for field in self.op.output_fields:
2626
      if field == "cluster_name":
2627
        entry = self.cfg.GetClusterName()
2628
      elif field == "master_node":
2629
        entry = self.cfg.GetMasterNode()
2630
      elif field == "drain_flag":
2631
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2632
      else:
2633
        raise errors.ParameterError(field)
2634
      values.append(entry)
2635
    return values
2636

    
2637

    
2638
class LUActivateInstanceDisks(NoHooksLU):
2639
  """Bring up an instance's disks.
2640

2641
  """
2642
  _OP_REQP = ["instance_name"]
2643
  REQ_BGL = False
2644

    
2645
  def ExpandNames(self):
2646
    self._ExpandAndLockInstance()
2647
    self.needed_locks[locking.LEVEL_NODE] = []
2648
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2649

    
2650
  def DeclareLocks(self, level):
2651
    if level == locking.LEVEL_NODE:
2652
      self._LockInstancesNodes()
2653

    
2654
  def CheckPrereq(self):
2655
    """Check prerequisites.
2656

2657
    This checks that the instance is in the cluster.
2658

2659
    """
2660
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2661
    assert self.instance is not None, \
2662
      "Cannot retrieve locked instance %s" % self.op.instance_name
2663
    _CheckNodeOnline(self, self.instance.primary_node)
2664

    
2665
  def Exec(self, feedback_fn):
2666
    """Activate the disks.
2667

2668
    """
2669
    disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
2670
    if not disks_ok:
2671
      raise errors.OpExecError("Cannot activate block devices")
2672

    
2673
    return disks_info
2674

    
2675

    
2676
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
2677
  """Prepare the block devices for an instance.
2678

2679
  This sets up the block devices on all nodes.
2680

2681
  @type lu: L{LogicalUnit}
2682
  @param lu: the logical unit on whose behalf we execute
2683
  @type instance: L{objects.Instance}
2684
  @param instance: the instance for whose disks we assemble
2685
  @type ignore_secondaries: boolean
2686
  @param ignore_secondaries: if true, errors on secondary nodes
2687
      won't result in an error return from the function
2688
  @return: False if the operation failed, otherwise a list of
2689
      (host, instance_visible_name, node_visible_name)
2690
      with the mapping from node devices to instance devices
2691

2692
  """
2693
  device_info = []
2694
  disks_ok = True
2695
  iname = instance.name
2696
  # With the two passes mechanism we try to reduce the window of
2697
  # opportunity for the race condition of switching DRBD to primary
2698
  # before handshaking occured, but we do not eliminate it
2699

    
2700
  # The proper fix would be to wait (with some limits) until the
2701
  # connection has been made and drbd transitions from WFConnection
2702
  # into any other network-connected state (Connected, SyncTarget,
2703
  # SyncSource, etc.)
2704

    
2705
  # 1st pass, assemble on all nodes in secondary mode
2706
  for inst_disk in instance.disks:
2707
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2708
      lu.cfg.SetDiskID(node_disk, node)
2709
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2710
      msg = result.fail_msg
2711
      if msg:
2712
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2713
                           " (is_primary=False, pass=1): %s",
2714
                           inst_disk.iv_name, node, msg)
2715
        if not ignore_secondaries:
2716
          disks_ok = False
2717

    
2718
  # FIXME: race condition on drbd migration to primary
2719

    
2720
  # 2nd pass, do only the primary node
2721
  for inst_disk in instance.disks:
2722
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2723
      if node != instance.primary_node:
2724
        continue
2725
      lu.cfg.SetDiskID(node_disk, node)
2726
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2727
      msg = result.fail_msg
2728
      if msg:
2729
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2730
                           " (is_primary=True, pass=2): %s",
2731
                           inst_disk.iv_name, node, msg)
2732
        disks_ok = False
2733
    device_info.append((instance.primary_node, inst_disk.iv_name,
2734
                        result.payload))
2735

    
2736
  # leave the disks configured for the primary node
2737
  # this is a workaround that would be fixed better by
2738
  # improving the logical/physical id handling
2739
  for disk in instance.disks:
2740
    lu.cfg.SetDiskID(disk, instance.primary_node)
2741

    
2742
  return disks_ok, device_info
2743

    
2744

    
2745
def _StartInstanceDisks(lu, instance, force):
2746
  """Start the disks of an instance.
2747

2748
  """
2749
  disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2750
                                           ignore_secondaries=force)
2751
  if not disks_ok:
2752
    _ShutdownInstanceDisks(lu, instance)
2753
    if force is not None and not force:
2754
      lu.proc.LogWarning("", hint="If the message above refers to a"
2755
                         " secondary node,"
2756
                         " you can retry the operation using '--force'.")
2757
    raise errors.OpExecError("Disk consistency error")
2758

    
2759

    
2760
class LUDeactivateInstanceDisks(NoHooksLU):
2761
  """Shutdown an instance's disks.
2762

2763
  """
2764
  _OP_REQP = ["instance_name"]
2765
  REQ_BGL = False
2766

    
2767
  def ExpandNames(self):
2768
    self._ExpandAndLockInstance()
2769
    self.needed_locks[locking.LEVEL_NODE] = []
2770
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2771

    
2772
  def DeclareLocks(self, level):
2773
    if level == locking.LEVEL_NODE:
2774
      self._LockInstancesNodes()
2775

    
2776
  def CheckPrereq(self):
2777
    """Check prerequisites.
2778

2779
    This checks that the instance is in the cluster.
2780

2781
    """
2782
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2783
    assert self.instance is not None, \
2784
      "Cannot retrieve locked instance %s" % self.op.instance_name
2785

    
2786
  def Exec(self, feedback_fn):
2787
    """Deactivate the disks
2788

2789
    """
2790
    instance = self.instance
2791
    _SafeShutdownInstanceDisks(self, instance)
2792

    
2793

    
2794
def _SafeShutdownInstanceDisks(lu, instance):
2795
  """Shutdown block devices of an instance.
2796

2797
  This function checks if an instance is running, before calling
2798
  _ShutdownInstanceDisks.
2799

2800
  """
2801
  pnode = instance.primary_node
2802
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
2803
  ins_l.Raise("Can't contact node %s" % pnode)
2804

    
2805
  if instance.name in ins_l.payload:
2806
    raise errors.OpExecError("Instance is running, can't shutdown"
2807
                             " block devices.")
2808

    
2809
  _ShutdownInstanceDisks(lu, instance)
2810

    
2811

    
2812
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2813
  """Shutdown block devices of an instance.
2814

2815
  This does the shutdown on all nodes of the instance.
2816

2817
  If the ignore_primary is false, errors on the primary node are
2818
  ignored.
2819

2820
  """
2821
  all_result = True
2822
  for disk in instance.disks:
2823
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2824
      lu.cfg.SetDiskID(top_disk, node)
2825
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
2826
      msg = result.fail_msg
2827
      if msg:
2828
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
2829
                      disk.iv_name, node, msg)
2830
        if not ignore_primary or node != instance.primary_node:
2831
          all_result = False
2832
  return all_result
2833

    
2834

    
2835
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
2836
  """Checks if a node has enough free memory.
2837

2838
  This function check if a given node has the needed amount of free
2839
  memory. In case the node has less memory or we cannot get the
2840
  information from the node, this function raise an OpPrereqError
2841
  exception.
2842

2843
  @type lu: C{LogicalUnit}
2844
  @param lu: a logical unit from which we get configuration data
2845
  @type node: C{str}
2846
  @param node: the node to check
2847
  @type reason: C{str}
2848
  @param reason: string to use in the error message
2849
  @type requested: C{int}
2850
  @param requested: the amount of memory in MiB to check for
2851
  @type hypervisor_name: C{str}
2852
  @param hypervisor_name: the hypervisor to ask for memory stats
2853
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2854
      we cannot check the node
2855

2856
  """
2857
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
2858
  nodeinfo[node].Raise("Can't get data from node %s" % node, prereq=True)
2859
  free_mem = nodeinfo[node].payload.get('memory_free', None)
2860
  if not isinstance(free_mem, int):
2861
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2862
                               " was '%s'" % (node, free_mem))
2863
  if requested > free_mem:
2864
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2865
                               " needed %s MiB, available %s MiB" %
2866
                               (node, reason, requested, free_mem))
2867

    
2868

    
2869
class LUStartupInstance(LogicalUnit):
2870
  """Starts an instance.
2871

2872
  """
2873
  HPATH = "instance-start"
2874
  HTYPE = constants.HTYPE_INSTANCE
2875
  _OP_REQP = ["instance_name", "force"]
2876
  REQ_BGL = False
2877

    
2878
  def ExpandNames(self):
2879
    self._ExpandAndLockInstance()
2880

    
2881
  def BuildHooksEnv(self):
2882
    """Build hooks env.
2883

2884
    This runs on master, primary and secondary nodes of the instance.
2885

2886
    """
2887
    env = {
2888
      "FORCE": self.op.force,
2889
      }
2890
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2891
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2892
    return env, nl, nl
2893

    
2894
  def CheckPrereq(self):
2895
    """Check prerequisites.
2896

2897
    This checks that the instance is in the cluster.
2898

2899
    """
2900
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2901
    assert self.instance is not None, \
2902
      "Cannot retrieve locked instance %s" % self.op.instance_name
2903

    
2904
    # extra beparams
2905
    self.beparams = getattr(self.op, "beparams", {})
2906
    if self.beparams:
2907
      if not isinstance(self.beparams, dict):
2908
        raise errors.OpPrereqError("Invalid beparams passed: %s, expected"
2909
                                   " dict" % (type(self.beparams), ))
2910
      # fill the beparams dict
2911
      utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
2912
      self.op.beparams = self.beparams
2913

    
2914
    # extra hvparams
2915
    self.hvparams = getattr(self.op, "hvparams", {})
2916
    if self.hvparams:
2917
      if not isinstance(self.hvparams, dict):
2918
        raise errors.OpPrereqError("Invalid hvparams passed: %s, expected"
2919
                                   " dict" % (type(self.hvparams), ))
2920

    
2921
      # check hypervisor parameter syntax (locally)
2922
      cluster = self.cfg.GetClusterInfo()
2923
      utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
2924
      filled_hvp = objects.FillDict(cluster.hvparams[instance.hypervisor],
2925
                                    instance.hvparams)
2926
      filled_hvp.update(self.hvparams)
2927
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
2928
      hv_type.CheckParameterSyntax(filled_hvp)
2929
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
2930
      self.op.hvparams = self.hvparams
2931

    
2932
    _CheckNodeOnline(self, instance.primary_node)
2933

    
2934
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2935
    # check bridges existance
2936
    _CheckInstanceBridgesExist(self, instance)
2937

    
2938
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2939
                                              instance.name,
2940
                                              instance.hypervisor)
2941
    remote_info.Raise("Error checking node %s" % instance.primary_node,
2942
                      prereq=True)
2943
    if not remote_info.payload: # not running already
2944
      _CheckNodeFreeMemory(self, instance.primary_node,
2945
                           "starting instance %s" % instance.name,
2946
                           bep[constants.BE_MEMORY], instance.hypervisor)
2947

    
2948
  def Exec(self, feedback_fn):
2949
    """Start the instance.
2950

2951
    """
2952
    instance = self.instance
2953
    force = self.op.force
2954

    
2955
    self.cfg.MarkInstanceUp(instance.name)
2956

    
2957
    node_current = instance.primary_node
2958

    
2959
    _StartInstanceDisks(self, instance, force)
2960

    
2961
    result = self.rpc.call_instance_start(node_current, instance,
2962
                                          self.hvparams, self.beparams)
2963
    msg = result.fail_msg
2964
    if msg:
2965
      _ShutdownInstanceDisks(self, instance)
2966
      raise errors.OpExecError("Could not start instance: %s" % msg)
2967

    
2968

    
2969
class LURebootInstance(LogicalUnit):
2970
  """Reboot an instance.
2971

2972
  """
2973
  HPATH = "instance-reboot"
2974
  HTYPE = constants.HTYPE_INSTANCE
2975
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2976
  REQ_BGL = False
2977

    
2978
  def ExpandNames(self):
2979
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2980
                                   constants.INSTANCE_REBOOT_HARD,
2981
                                   constants.INSTANCE_REBOOT_FULL]:
2982
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2983
                                  (constants.INSTANCE_REBOOT_SOFT,
2984
                                   constants.INSTANCE_REBOOT_HARD,
2985
                                   constants.INSTANCE_REBOOT_FULL))
2986
    self._ExpandAndLockInstance()
2987

    
2988
  def BuildHooksEnv(self):
2989
    """Build hooks env.
2990

2991
    This runs on master, primary and secondary nodes of the instance.
2992

2993
    """
2994
    env = {
2995
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2996
      "REBOOT_TYPE": self.op.reboot_type,
2997
      }
2998
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2999
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3000
    return env, nl, nl
3001

    
3002
  def CheckPrereq(self):
3003
    """Check prerequisites.
3004

3005
    This checks that the instance is in the cluster.
3006

3007
    """
3008
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3009
    assert self.instance is not None, \
3010
      "Cannot retrieve locked instance %s" % self.op.instance_name
3011

    
3012
    _CheckNodeOnline(self, instance.primary_node)
3013

    
3014
    # check bridges existance
3015
    _CheckInstanceBridgesExist(self, instance)
3016

    
3017
  def Exec(self, feedback_fn):
3018
    """Reboot the instance.
3019

3020
    """
3021
    instance = self.instance
3022
    ignore_secondaries = self.op.ignore_secondaries
3023
    reboot_type = self.op.reboot_type
3024

    
3025
    node_current = instance.primary_node
3026

    
3027
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
3028
                       constants.INSTANCE_REBOOT_HARD]:
3029
      for disk in instance.disks:
3030
        self.cfg.SetDiskID(disk, node_current)
3031
      result = self.rpc.call_instance_reboot(node_current, instance,
3032
                                             reboot_type)
3033
      result.Raise("Could not reboot instance")
3034
    else:
3035
      result = self.rpc.call_instance_shutdown(node_current, instance)
3036
      result.Raise("Could not shutdown instance for full reboot")
3037
      _ShutdownInstanceDisks(self, instance)
3038
      _StartInstanceDisks(self, instance, ignore_secondaries)
3039
      result = self.rpc.call_instance_start(node_current, instance, None, None)
3040
      msg = result.fail_msg
3041
      if msg:
3042
        _ShutdownInstanceDisks(self, instance)
3043
        raise errors.OpExecError("Could not start instance for"
3044
                                 " full reboot: %s" % msg)
3045

    
3046
    self.cfg.MarkInstanceUp(instance.name)
3047

    
3048

    
3049
class LUShutdownInstance(LogicalUnit):
3050
  """Shutdown an instance.
3051

3052
  """
3053
  HPATH = "instance-stop"
3054
  HTYPE = constants.HTYPE_INSTANCE
3055
  _OP_REQP = ["instance_name"]
3056
  REQ_BGL = False
3057

    
3058
  def ExpandNames(self):
3059
    self._ExpandAndLockInstance()
3060

    
3061
  def BuildHooksEnv(self):
3062
    """Build hooks env.
3063

3064
    This runs on master, primary and secondary nodes of the instance.
3065

3066
    """
3067
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3068
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3069
    return env, nl, nl
3070

    
3071
  def CheckPrereq(self):
3072
    """Check prerequisites.
3073

3074
    This checks that the instance is in the cluster.
3075

3076
    """
3077
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3078
    assert self.instance is not None, \
3079
      "Cannot retrieve locked instance %s" % self.op.instance_name
3080
    _CheckNodeOnline(self, self.instance.primary_node)
3081

    
3082
  def Exec(self, feedback_fn):
3083
    """Shutdown the instance.
3084

3085
    """
3086
    instance = self.instance
3087
    node_current = instance.primary_node
3088
    self.cfg.MarkInstanceDown(instance.name)
3089
    result = self.rpc.call_instance_shutdown(node_current, instance)
3090
    msg = result.fail_msg
3091
    if msg:
3092
      self.proc.LogWarning("Could not shutdown instance: %s" % msg)
3093

    
3094
    _ShutdownInstanceDisks(self, instance)
3095

    
3096

    
3097
class LUReinstallInstance(LogicalUnit):
3098
  """Reinstall an instance.
3099

3100
  """
3101
  HPATH = "instance-reinstall"
3102
  HTYPE = constants.HTYPE_INSTANCE
3103
  _OP_REQP = ["instance_name"]
3104
  REQ_BGL = False
3105

    
3106
  def ExpandNames(self):
3107
    self._ExpandAndLockInstance()
3108

    
3109
  def BuildHooksEnv(self):
3110
    """Build hooks env.
3111

3112
    This runs on master, primary and secondary nodes of the instance.
3113

3114
    """
3115
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3116
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3117
    return env, nl, nl
3118

    
3119
  def CheckPrereq(self):
3120
    """Check prerequisites.
3121

3122
    This checks that the instance is in the cluster and is not running.
3123

3124
    """
3125
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3126
    assert instance is not None, \
3127
      "Cannot retrieve locked instance %s" % self.op.instance_name
3128
    _CheckNodeOnline(self, instance.primary_node)
3129

    
3130
    if instance.disk_template == constants.DT_DISKLESS:
3131
      raise errors.OpPrereqError("Instance '%s' has no disks" %
3132
                                 self.op.instance_name)
3133
    if instance.admin_up:
3134
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3135
                                 self.op.instance_name)
3136
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3137
                                              instance.name,
3138
                                              instance.hypervisor)
3139
    remote_info.Raise("Error checking node %s" % instance.primary_node,
3140
                      prereq=True)
3141
    if remote_info.payload:
3142
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3143
                                 (self.op.instance_name,
3144
                                  instance.primary_node))
3145

    
3146
    self.op.os_type = getattr(self.op, "os_type", None)
3147
    if self.op.os_type is not None:
3148
      # OS verification
3149
      pnode = self.cfg.GetNodeInfo(
3150
        self.cfg.ExpandNodeName(instance.primary_node))
3151
      if pnode is None:
3152
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
3153
                                   self.op.pnode)
3154
      result = self.rpc.call_os_get(pnode.name, self.op.os_type)
3155
      result.Raise("OS '%s' not in supported OS list for primary node %s" %
3156
                   (self.op.os_type, pnode.name), prereq=True)
3157

    
3158
    self.instance = instance
3159

    
3160
  def Exec(self, feedback_fn):
3161
    """Reinstall the instance.
3162

3163
    """
3164
    inst = self.instance
3165

    
3166
    if self.op.os_type is not None:
3167
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
3168
      inst.os = self.op.os_type
3169
      self.cfg.Update(inst)
3170

    
3171
    _StartInstanceDisks(self, inst, None)
3172
    try:
3173
      feedback_fn("Running the instance OS create scripts...")
3174
      result = self.rpc.call_instance_os_add(inst.primary_node, inst, True)
3175
      result.Raise("Could not install OS for instance %s on node %s" %
3176
                   (inst.name, inst.primary_node))
3177
    finally:
3178
      _ShutdownInstanceDisks(self, inst)
3179

    
3180

    
3181
class LURenameInstance(LogicalUnit):
3182
  """Rename an instance.
3183

3184
  """
3185
  HPATH = "instance-rename"
3186
  HTYPE = constants.HTYPE_INSTANCE
3187
  _OP_REQP = ["instance_name", "new_name"]
3188

    
3189
  def BuildHooksEnv(self):
3190
    """Build hooks env.
3191

3192
    This runs on master, primary and secondary nodes of the instance.
3193

3194
    """
3195
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3196
    env["INSTANCE_NEW_NAME"] = self.op.new_name
3197
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3198
    return env, nl, nl
3199

    
3200
  def CheckPrereq(self):
3201
    """Check prerequisites.
3202

3203
    This checks that the instance is in the cluster and is not running.
3204

3205
    """
3206
    instance = self.cfg.GetInstanceInfo(
3207
      self.cfg.ExpandInstanceName(self.op.instance_name))
3208
    if instance is None:
3209
      raise errors.OpPrereqError("Instance '%s' not known" %
3210
                                 self.op.instance_name)
3211
    _CheckNodeOnline(self, instance.primary_node)
3212

    
3213
    if instance.admin_up:
3214
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3215
                                 self.op.instance_name)
3216
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3217
                                              instance.name,
3218
                                              instance.hypervisor)
3219
    remote_info.Raise("Error checking node %s" % instance.primary_node,
3220
                      prereq=True)
3221
    if remote_info.payload:
3222
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3223
                                 (self.op.instance_name,
3224
                                  instance.primary_node))
3225
    self.instance = instance
3226

    
3227
    # new name verification
3228
    name_info = utils.HostInfo(self.op.new_name)
3229

    
3230
    self.op.new_name = new_name = name_info.name
3231
    instance_list = self.cfg.GetInstanceList()
3232
    if new_name in instance_list:
3233
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3234
                                 new_name)
3235

    
3236
    if not getattr(self.op, "ignore_ip", False):
3237
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
3238
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3239
                                   (name_info.ip, new_name))
3240

    
3241

    
3242
  def Exec(self, feedback_fn):
3243
    """Reinstall the instance.
3244

3245
    """
3246
    inst = self.instance
3247
    old_name = inst.name
3248

    
3249
    if inst.disk_template == constants.DT_FILE:
3250
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3251

    
3252
    self.cfg.RenameInstance(inst.name, self.op.new_name)
3253
    # Change the instance lock. This is definitely safe while we hold the BGL
3254
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
3255
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
3256

    
3257
    # re-read the instance from the configuration after rename
3258
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
3259

    
3260
    if inst.disk_template == constants.DT_FILE:
3261
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3262
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
3263
                                                     old_file_storage_dir,
3264
                                                     new_file_storage_dir)
3265
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
3266
                   " (but the instance has been renamed in Ganeti)" %
3267
                   (inst.primary_node, old_file_storage_dir,
3268
                    new_file_storage_dir))
3269

    
3270
    _StartInstanceDisks(self, inst, None)
3271
    try:
3272
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
3273
                                                 old_name)
3274
      msg = result.fail_msg
3275
      if msg:
3276
        msg = ("Could not run OS rename script for instance %s on node %s"
3277
               " (but the instance has been renamed in Ganeti): %s" %
3278
               (inst.name, inst.primary_node, msg))
3279
        self.proc.LogWarning(msg)
3280
    finally:
3281
      _ShutdownInstanceDisks(self, inst)
3282

    
3283

    
3284
class LURemoveInstance(LogicalUnit):
3285
  """Remove an instance.
3286

3287
  """
3288
  HPATH = "instance-remove"
3289
  HTYPE = constants.HTYPE_INSTANCE
3290
  _OP_REQP = ["instance_name", "ignore_failures"]
3291
  REQ_BGL = False
3292

    
3293
  def ExpandNames(self):
3294
    self._ExpandAndLockInstance()
3295
    self.needed_locks[locking.LEVEL_NODE] = []
3296
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3297

    
3298
  def DeclareLocks(self, level):
3299
    if level == locking.LEVEL_NODE:
3300
      self._LockInstancesNodes()
3301

    
3302
  def BuildHooksEnv(self):
3303
    """Build hooks env.
3304

3305
    This runs on master, primary and secondary nodes of the instance.
3306

3307
    """
3308
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3309
    nl = [self.cfg.GetMasterNode()]
3310
    return env, nl, nl
3311

    
3312
  def CheckPrereq(self):
3313
    """Check prerequisites.
3314

3315
    This checks that the instance is in the cluster.
3316

3317
    """
3318
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3319
    assert self.instance is not None, \
3320
      "Cannot retrieve locked instance %s" % self.op.instance_name
3321

    
3322
  def Exec(self, feedback_fn):
3323
    """Remove the instance.
3324

3325
    """
3326
    instance = self.instance
3327
    logging.info("Shutting down instance %s on node %s",
3328
                 instance.name, instance.primary_node)
3329

    
3330
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
3331
    msg = result.fail_msg
3332
    if msg:
3333
      if self.op.ignore_failures:
3334
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
3335
      else:
3336
        raise errors.OpExecError("Could not shutdown instance %s on"
3337
                                 " node %s: %s" %
3338
                                 (instance.name, instance.primary_node, msg))
3339

    
3340
    logging.info("Removing block devices for instance %s", instance.name)
3341

    
3342
    if not _RemoveDisks(self, instance):
3343
      if self.op.ignore_failures:
3344
        feedback_fn("Warning: can't remove instance's disks")
3345
      else:
3346
        raise errors.OpExecError("Can't remove instance's disks")
3347

    
3348
    logging.info("Removing instance %s out of cluster config", instance.name)
3349

    
3350
    self.cfg.RemoveInstance(instance.name)
3351
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
3352

    
3353

    
3354
class LUQueryInstances(NoHooksLU):
3355
  """Logical unit for querying instances.
3356

3357
  """
3358
  _OP_REQP = ["output_fields", "names", "use_locking"]
3359
  REQ_BGL = False
3360
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
3361
                                    "admin_state",
3362
                                    "disk_template", "ip", "mac", "bridge",
3363
                                    "nic_mode", "nic_link",
3364
                                    "sda_size", "sdb_size", "vcpus", "tags",
3365
                                    "network_port", "beparams",
3366
                                    r"(disk)\.(size)/([0-9]+)",
3367
                                    r"(disk)\.(sizes)", "disk_usage",
3368
                                    r"(nic)\.(mac|ip|mode|link)/([0-9]+)",
3369
                                    r"(nic)\.(bridge)/([0-9]+)",
3370
                                    r"(nic)\.(macs|ips|modes|links|bridges)",
3371
                                    r"(disk|nic)\.(count)",
3372
                                    "serial_no", "hypervisor", "hvparams",] +
3373
                                  ["hv/%s" % name
3374
                                   for name in constants.HVS_PARAMETERS] +
3375
                                  ["be/%s" % name
3376
                                   for name in constants.BES_PARAMETERS])
3377
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
3378

    
3379

    
3380
  def ExpandNames(self):
3381
    _CheckOutputFields(static=self._FIELDS_STATIC,
3382
                       dynamic=self._FIELDS_DYNAMIC,
3383
                       selected=self.op.output_fields)
3384

    
3385
    self.needed_locks = {}
3386
    self.share_locks[locking.LEVEL_INSTANCE] = 1
3387
    self.share_locks[locking.LEVEL_NODE] = 1
3388

    
3389
    if self.op.names:
3390
      self.wanted = _GetWantedInstances(self, self.op.names)
3391
    else:
3392
      self.wanted = locking.ALL_SET
3393

    
3394
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
3395
    self.do_locking = self.do_node_query and self.op.use_locking
3396
    if self.do_locking:
3397
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3398
      self.needed_locks[locking.LEVEL_NODE] = []
3399
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3400

    
3401
  def DeclareLocks(self, level):
3402
    if level == locking.LEVEL_NODE and self.do_locking:
3403
      self._LockInstancesNodes()
3404

    
3405
  def CheckPrereq(self):
3406
    """Check prerequisites.
3407

3408
    """
3409
    pass
3410

    
3411
  def Exec(self, feedback_fn):
3412
    """Computes the list of nodes and their attributes.
3413

3414
    """
3415
    all_info = self.cfg.GetAllInstancesInfo()
3416
    if self.wanted == locking.ALL_SET:
3417
      # caller didn't specify instance names, so ordering is not important
3418
      if self.do_locking:
3419
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
3420
      else:
3421
        instance_names = all_info.keys()
3422
      instance_names = utils.NiceSort(instance_names)
3423
    else:
3424
      # caller did specify names, so we must keep the ordering
3425
      if self.do_locking:
3426
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
3427
      else:
3428
        tgt_set = all_info.keys()
3429
      missing = set(self.wanted).difference(tgt_set)
3430
      if missing:
3431
        raise errors.OpExecError("Some instances were removed before"
3432
                                 " retrieving their data: %s" % missing)
3433
      instance_names = self.wanted
3434

    
3435
    instance_list = [all_info[iname] for iname in instance_names]
3436

    
3437
    # begin data gathering
3438

    
3439
    nodes = frozenset([inst.primary_node for inst in instance_list])
3440
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
3441

    
3442
    bad_nodes = []
3443
    off_nodes = []
3444
    if self.do_node_query:
3445
      live_data = {}
3446
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
3447
      for name in nodes:
3448
        result = node_data[name]
3449
        if result.offline:
3450
          # offline nodes will be in both lists
3451
          off_nodes.append(name)
3452
        if result.failed or result.fail_msg:
3453
          bad_nodes.append(name)
3454
        else:
3455
          if result.payload:
3456
            live_data.update(result.payload)
3457
          # else no instance is alive
3458
    else:
3459
      live_data = dict([(name, {}) for name in instance_names])
3460

    
3461
    # end data gathering
3462

    
3463
    HVPREFIX = "hv/"
3464
    BEPREFIX = "be/"
3465
    output = []
3466
    cluster = self.cfg.GetClusterInfo()
3467
    for instance in instance_list:
3468
      iout = []
3469
      i_hv = cluster.FillHV(instance)
3470
      i_be = cluster.FillBE(instance)
3471
      i_nicp = [objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
3472
                                 nic.nicparams) for nic in instance.nics]
3473
      for field in self.op.output_fields:
3474
        st_match = self._FIELDS_STATIC.Matches(field)
3475
        if field == "name":
3476
          val = instance.name
3477
        elif field == "os":
3478
          val = instance.os
3479
        elif field == "pnode":
3480
          val = instance.primary_node
3481
        elif field == "snodes":
3482
          val = list(instance.secondary_nodes)
3483
        elif field == "admin_state":
3484
          val = instance.admin_up
3485
        elif field == "oper_state":
3486
          if instance.primary_node in bad_nodes:
3487
            val = None
3488
          else:
3489
            val = bool(live_data.get(instance.name))
3490
        elif field == "status":
3491
          if instance.primary_node in off_nodes:
3492
            val = "ERROR_nodeoffline"
3493
          elif instance.primary_node in bad_nodes:
3494
            val = "ERROR_nodedown"
3495
          else:
3496
            running = bool(live_data.get(instance.name))
3497
            if running:
3498
              if instance.admin_up:
3499
                val = "running"
3500
              else:
3501
                val = "ERROR_up"
3502
            else:
3503
              if instance.admin_up:
3504
                val = "ERROR_down"
3505
              else:
3506
                val = "ADMIN_down"
3507
        elif field == "oper_ram":
3508
          if instance.primary_node in bad_nodes:
3509
            val = None
3510
          elif instance.name in live_data:
3511
            val = live_data[instance.name].get("memory", "?")
3512
          else:
3513
            val = "-"
3514
        elif field == "disk_template":
3515
          val = instance.disk_template
3516
        elif field == "ip":
3517
          if instance.nics:
3518
            val = instance.nics[0].ip
3519
          else:
3520
            val = None
3521
        elif field == "nic_mode":
3522
          if instance.nics:
3523
            val = i_nicp[0][constants.NIC_MODE]
3524
          else:
3525
            val = None
3526
        elif field == "nic_link":
3527
          if instance.nics:
3528
            val = i_nicp[0][constants.NIC_LINK]
3529
          else:
3530
            val = None
3531
        elif field == "bridge":
3532
          if (instance.nics and
3533
              i_nicp[0][constants.NIC_MODE] == constants.NIC_MODE_BRIDGED):
3534
            val = i_nicp[0][constants.NIC_LINK]
3535
          else:
3536
            val = None
3537
        elif field == "mac":
3538
          if instance.nics:
3539
            val = instance.nics[0].mac
3540
          else:
3541
            val = None
3542
        elif field == "sda_size" or field == "sdb_size":
3543
          idx = ord(field[2]) - ord('a')
3544
          try:
3545
            val = instance.FindDisk(idx).size
3546
          except errors.OpPrereqError:
3547
            val = None
3548
        elif field == "disk_usage": # total disk usage per node
3549
          disk_sizes = [{'size': disk.size} for disk in instance.disks]
3550
          val = _ComputeDiskSize(instance.disk_template, disk_sizes)
3551
        elif field == "tags":
3552
          val = list(instance.GetTags())
3553
        elif field == "serial_no":
3554
          val = instance.serial_no
3555
        elif field == "network_port":
3556
          val = instance.network_port
3557
        elif field == "hypervisor":
3558
          val = instance.hypervisor
3559
        elif field == "hvparams":
3560
          val = i_hv
3561
        elif (field.startswith(HVPREFIX) and
3562
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
3563
          val = i_hv.get(field[len(HVPREFIX):], None)
3564
        elif field == "beparams":
3565
          val = i_be
3566
        elif (field.startswith(BEPREFIX) and
3567
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
3568
          val = i_be.get(field[len(BEPREFIX):], None)
3569
        elif st_match and st_match.groups():
3570
          # matches a variable list
3571
          st_groups = st_match.groups()
3572
          if st_groups and st_groups[0] == "disk":
3573
            if st_groups[1] == "count":
3574
              val = len(instance.disks)
3575
            elif st_groups[1] == "sizes":
3576
              val = [disk.size for disk in instance.disks]
3577
            elif st_groups[1] == "size":
3578
              try:
3579
                val = instance.FindDisk(st_groups[2]).size
3580
              except errors.OpPrereqError:
3581
                val = None
3582
            else:
3583
              assert False, "Unhandled disk parameter"
3584
          elif st_groups[0] == "nic":
3585
            if st_groups[1] == "count":
3586
              val = len(instance.nics)
3587
            elif st_groups[1] == "macs":
3588
              val = [nic.mac for nic in instance.nics]
3589
            elif st_groups[1] == "ips":
3590
              val = [nic.ip for nic in instance.nics]
3591
            elif st_groups[1] == "modes":
3592
              val = [nicp[constants.NIC_MODE] for nicp in i_nicp]
3593
            elif st_groups[1] == "links":
3594
              val = [nicp[constants.NIC_LINK] for nicp in i_nicp]
3595
            elif st_groups[1] == "bridges":
3596
              val = []
3597
              for nicp in i_nicp:
3598
                if nicp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
3599
                  val.append(nicp[constants.NIC_LINK])
3600
                else:
3601
                  val.append(None)
3602
            else:
3603
              # index-based item
3604
              nic_idx = int(st_groups[2])
3605
              if nic_idx >= len(instance.nics):
3606
                val = None
3607
              else:
3608
                if st_groups[1] == "mac":
3609
                  val = instance.nics[nic_idx].mac
3610
                elif st_groups[1] == "ip":
3611
                  val = instance.nics[nic_idx].ip
3612
                elif st_groups[1] == "mode":
3613
                  val = i_nicp[nic_idx][constants.NIC_MODE]
3614
                elif st_groups[1] == "link":
3615
                  val = i_nicp[nic_idx][constants.NIC_LINK]
3616
                elif st_groups[1] == "bridge":
3617
                  nic_mode = i_nicp[nic_idx][constants.NIC_MODE]
3618
                  if nic_mode == constants.NIC_MODE_BRIDGED:
3619
                    val = i_nicp[nic_idx][constants.NIC_LINK]
3620
                  else:
3621
                    val = None
3622
                else:
3623
                  assert False, "Unhandled NIC parameter"
3624
          else:
3625
            assert False, "Unhandled variable parameter"
3626
        else:
3627
          raise errors.ParameterError(field)
3628
        iout.append(val)
3629
      output.append(iout)
3630

    
3631
    return output
3632

    
3633

    
3634
class LUFailoverInstance(LogicalUnit):
3635
  """Failover an instance.
3636

3637
  """
3638
  HPATH = "instance-failover"
3639
  HTYPE = constants.HTYPE_INSTANCE
3640
  _OP_REQP = ["instance_name", "ignore_consistency"]
3641
  REQ_BGL = False
3642

    
3643
  def ExpandNames(self):
3644
    self._ExpandAndLockInstance()
3645
    self.needed_locks[locking.LEVEL_NODE] = []
3646
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3647

    
3648
  def DeclareLocks(self, level):
3649
    if level == locking.LEVEL_NODE:
3650
      self._LockInstancesNodes()
3651

    
3652
  def BuildHooksEnv(self):
3653
    """Build hooks env.
3654

3655
    This runs on master, primary and secondary nodes of the instance.
3656

3657
    """
3658
    env = {
3659
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
3660
      }
3661
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3662
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3663
    return env, nl, nl
3664

    
3665
  def CheckPrereq(self):
3666
    """Check prerequisites.
3667

3668
    This checks that the instance is in the cluster.
3669

3670
    """
3671
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3672
    assert self.instance is not None, \
3673
      "Cannot retrieve locked instance %s" % self.op.instance_name
3674

    
3675
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3676
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3677
      raise errors.OpPrereqError("Instance's disk layout is not"
3678
                                 " network mirrored, cannot failover.")
3679

    
3680
    secondary_nodes = instance.secondary_nodes
3681
    if not secondary_nodes:
3682
      raise errors.ProgrammerError("no secondary node but using "
3683
                                   "a mirrored disk template")
3684

    
3685
    target_node = secondary_nodes[0]
3686
    _CheckNodeOnline(self, target_node)
3687
    _CheckNodeNotDrained(self, target_node)
3688
    if instance.admin_up:
3689
      # check memory requirements on the secondary node
3690
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
3691
                           instance.name, bep[constants.BE_MEMORY],
3692
                           instance.hypervisor)
3693
    else:
3694
      self.LogInfo("Not checking memory on the secondary node as"
3695
                   " instance will not be started")
3696

    
3697
    # check bridge existance
3698
    _CheckInstanceBridgesExist(self, instance, node=target_node)
3699

    
3700
  def Exec(self, feedback_fn):
3701
    """Failover an instance.
3702

3703
    The failover is done by shutting it down on its present node and
3704
    starting it on the secondary.
3705

3706
    """
3707
    instance = self.instance
3708

    
3709
    source_node = instance.primary_node
3710
    target_node = instance.secondary_nodes[0]
3711

    
3712
    feedback_fn("* checking disk consistency between source and target")
3713
    for dev in instance.disks:
3714
      # for drbd, these are drbd over lvm
3715
      if not _CheckDiskConsistency(self, dev, target_node, False):
3716
        if instance.admin_up and not self.op.ignore_consistency:
3717
          raise errors.OpExecError("Disk %s is degraded on target node,"
3718
                                   " aborting failover." % dev.iv_name)
3719

    
3720
    feedback_fn("* shutting down instance on source node")
3721
    logging.info("Shutting down instance %s on node %s",
3722
                 instance.name, source_node)
3723

    
3724
    result = self.rpc.call_instance_shutdown(source_node, instance)
3725
    msg = result.fail_msg
3726
    if msg:
3727
      if self.op.ignore_consistency:
3728
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
3729
                             " Proceeding anyway. Please make sure node"
3730
                             " %s is down. Error details: %s",
3731
                             instance.name, source_node, source_node, msg)
3732
      else:
3733
        raise errors.OpExecError("Could not shutdown instance %s on"
3734
                                 " node %s: %s" %
3735
                                 (instance.name, source_node, msg))
3736

    
3737
    feedback_fn("* deactivating the instance's disks on source node")
3738
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3739
      raise errors.OpExecError("Can't shut down the instance's disks.")
3740

    
3741
    instance.primary_node = target_node
3742
    # distribute new instance config to the other nodes
3743
    self.cfg.Update(instance)
3744

    
3745
    # Only start the instance if it's marked as up
3746
    if instance.admin_up:
3747
      feedback_fn("* activating the instance's disks on target node")
3748
      logging.info("Starting instance %s on node %s",
3749
                   instance.name, target_node)
3750

    
3751
      disks_ok, dummy = _AssembleInstanceDisks(self, instance,
3752
                                               ignore_secondaries=True)
3753
      if not disks_ok:
3754
        _ShutdownInstanceDisks(self, instance)
3755
        raise errors.OpExecError("Can't activate the instance's disks")
3756

    
3757
      feedback_fn("* starting the instance on the target node")
3758
      result = self.rpc.call_instance_start(target_node, instance, None, None)
3759
      msg = result.fail_msg
3760
      if msg:
3761
        _ShutdownInstanceDisks(self, instance)
3762
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
3763
                                 (instance.name, target_node, msg))
3764

    
3765

    
3766
class LUMigrateInstance(LogicalUnit):
3767
  """Migrate an instance.
3768

3769
  This is migration without shutting down, compared to the failover,
3770
  which is done with shutdown.
3771

3772
  """
3773
  HPATH = "instance-migrate"
3774
  HTYPE = constants.HTYPE_INSTANCE
3775
  _OP_REQP = ["instance_name", "live", "cleanup"]
3776

    
3777
  REQ_BGL = False
3778

    
3779
  def ExpandNames(self):
3780
    self._ExpandAndLockInstance()
3781
    self.needed_locks[locking.LEVEL_NODE] = []
3782
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3783

    
3784
  def DeclareLocks(self, level):
3785
    if level == locking.LEVEL_NODE:
3786
      self._LockInstancesNodes()
3787

    
3788
  def BuildHooksEnv(self):
3789
    """Build hooks env.
3790

3791
    This runs on master, primary and secondary nodes of the instance.
3792

3793
    """
3794
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3795
    env["MIGRATE_LIVE"] = self.op.live
3796
    env["MIGRATE_CLEANUP"] = self.op.cleanup
3797
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3798
    return env, nl, nl
3799

    
3800
  def CheckPrereq(self):
3801
    """Check prerequisites.
3802

3803
    This checks that the instance is in the cluster.
3804

3805
    """
3806
    instance = self.cfg.GetInstanceInfo(
3807
      self.cfg.ExpandInstanceName(self.op.instance_name))
3808
    if instance is None:
3809
      raise errors.OpPrereqError("Instance '%s' not known" %
3810
                                 self.op.instance_name)
3811

    
3812
    if instance.disk_template != constants.DT_DRBD8:
3813
      raise errors.OpPrereqError("Instance's disk layout is not"
3814
                                 " drbd8, cannot migrate.")
3815

    
3816
    secondary_nodes = instance.secondary_nodes
3817
    if not secondary_nodes:
3818
      raise errors.ConfigurationError("No secondary node but using"
3819
                                      " drbd8 disk template")
3820

    
3821
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
3822

    
3823
    target_node = secondary_nodes[0]
3824
    # check memory requirements on the secondary node
3825
    _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
3826
                         instance.name, i_be[constants.BE_MEMORY],
3827
                         instance.hypervisor)
3828

    
3829
    # check bridge existance
3830
    _CheckInstanceBridgesExist(self, instance, node=target_node)
3831

    
3832
    if not self.op.cleanup:
3833
      _CheckNodeNotDrained(self, target_node)
3834
      result = self.rpc.call_instance_migratable(instance.primary_node,
3835
                                                 instance)
3836
      result.Raise("Can't migrate, please use failover", prereq=True)
3837

    
3838
    self.instance = instance
3839

    
3840
  def _WaitUntilSync(self):
3841
    """Poll with custom rpc for disk sync.
3842

3843
    This uses our own step-based rpc call.
3844

3845
    """
3846
    self.feedback_fn("* wait until resync is done")
3847
    all_done = False
3848
    while not all_done:
3849
      all_done = True
3850
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
3851
                                            self.nodes_ip,
3852
                                            self.instance.disks)
3853
      min_percent = 100
3854
      for node, nres in result.items():
3855
        nres.Raise("Cannot resync disks on node %s" % node)
3856
        node_done, node_percent = nres.payload
3857
        all_done = all_done and node_done
3858
        if node_percent is not None:
3859
          min_percent = min(min_percent, node_percent)
3860
      if not all_done:
3861
        if min_percent < 100:
3862
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
3863
        time.sleep(2)
3864

    
3865
  def _EnsureSecondary(self, node):
3866
    """Demote a node to secondary.
3867

3868
    """
3869
    self.feedback_fn("* switching node %s to secondary mode" % node)
3870

    
3871
    for dev in self.instance.disks:
3872
      self.cfg.SetDiskID(dev, node)
3873

    
3874
    result = self.rpc.call_blockdev_close(node, self.instance.name,
3875
                                          self.instance.disks)
3876
    result.Raise("Cannot change disk to secondary on node %s" % node)
3877

    
3878
  def _GoStandalone(self):
3879
    """Disconnect from the network.
3880

3881
    """
3882
    self.feedback_fn("* changing into standalone mode")
3883
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
3884
                                               self.instance.disks)
3885
    for node, nres in result.items():
3886
      nres.Raise("Cannot disconnect disks node %s" % node)
3887

    
3888
  def _GoReconnect(self, multimaster):
3889
    """Reconnect to the network.
3890

3891
    """
3892
    if multimaster:
3893
      msg = "dual-master"
3894
    else:
3895
      msg = "single-master"
3896
    self.feedback_fn("* changing disks into %s mode" % msg)
3897
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
3898
                                           self.instance.disks,
3899
                                           self.instance.name, multimaster)
3900
    for node, nres in result.items():
3901
      nres.Raise("Cannot change disks config on node %s" % node)
3902

    
3903
  def _ExecCleanup(self):
3904
    """Try to cleanup after a failed migration.
3905

3906
    The cleanup is done by:
3907
      - check that the instance is running only on one node
3908
        (and update the config if needed)
3909
      - change disks on its secondary node to secondary
3910
      - wait until disks are fully synchronized
3911
      - disconnect from the network
3912
      - change disks into single-master mode
3913
      - wait again until disks are fully synchronized
3914

3915
    """
3916
    instance = self.instance
3917
    target_node = self.target_node
3918
    source_node = self.source_node
3919

    
3920
    # check running on only one node
3921
    self.feedback_fn("* checking where the instance actually runs"
3922
                     " (if this hangs, the hypervisor might be in"
3923
                     " a bad state)")
3924
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
3925
    for node, result in ins_l.items():
3926
      result.Raise("Can't contact node %s" % node)
3927

    
3928
    runningon_source = instance.name in ins_l[source_node].payload
3929
    runningon_target = instance.name in ins_l[target_node].payload
3930

    
3931
    if runningon_source and runningon_target:
3932
      raise errors.OpExecError("Instance seems to be running on two nodes,"
3933
                               " or the hypervisor is confused. You will have"
3934
                               " to ensure manually that it runs only on one"
3935
                               " and restart this operation.")
3936

    
3937
    if not (runningon_source or runningon_target):
3938
      raise errors.OpExecError("Instance does not seem to be running at all."
3939
                               " In this case, it's safer to repair by"
3940
                               " running 'gnt-instance stop' to ensure disk"
3941
                               " shutdown, and then restarting it.")
3942

    
3943
    if runningon_target:
3944
      # the migration has actually succeeded, we need to update the config
3945
      self.feedback_fn("* instance running on secondary node (%s),"
3946
                       " updating config" % target_node)
3947
      instance.primary_node = target_node
3948
      self.cfg.Update(instance)
3949
      demoted_node = source_node
3950
    else:
3951
      self.feedback_fn("* instance confirmed to be running on its"
3952
                       " primary node (%s)" % source_node)
3953
      demoted_node = target_node
3954

    
3955
    self._EnsureSecondary(demoted_node)
3956
    try:
3957
      self._WaitUntilSync()
3958
    except errors.OpExecError:
3959
      # we ignore here errors, since if the device is standalone, it
3960
      # won't be able to sync
3961
      pass
3962
    self._GoStandalone()
3963
    self._GoReconnect(False)
3964
    self._WaitUntilSync()
3965

    
3966
    self.feedback_fn("* done")
3967

    
3968
  def _RevertDiskStatus(self):
3969
    """Try to revert the disk status after a failed migration.
3970

3971
    """
3972
    target_node = self.target_node
3973
    try:
3974
      self._EnsureSecondary(target_node)
3975
      self._GoStandalone()
3976
      self._GoReconnect(False)
3977
      self._WaitUntilSync()
3978
    except errors.OpExecError, err:
3979
      self.LogWarning("Migration failed and I can't reconnect the"
3980
                      " drives: error '%s'\n"
3981
                      "Please look and recover the instance status" %
3982
                      str(err))
3983

    
3984
  def _AbortMigration(self):
3985
    """Call the hypervisor code to abort a started migration.
3986

3987
    """
3988
    instance = self.instance
3989
    target_node = self.target_node
3990
    migration_info = self.migration_info
3991

    
3992
    abort_result = self.rpc.call_finalize_migration(target_node,
3993
                                                    instance,
3994
                                                    migration_info,
3995
                                                    False)
3996
    abort_msg = abort_result.fail_msg
3997
    if abort_msg:
3998
      logging.error("Aborting migration failed on target node %s: %s" %
3999
                    (target_node, abort_msg))
4000
      # Don't raise an exception here, as we stil have to try to revert the
4001
      # disk status, even if this step failed.
4002

    
4003
  def _ExecMigration(self):
4004
    """Migrate an instance.
4005

4006
    The migrate is done by:
4007
      - change the disks into dual-master mode
4008
      - wait until disks are fully synchronized again
4009
      - migrate the instance
4010
      - change disks on the new secondary node (the old primary) to secondary
4011
      - wait until disks are fully synchronized
4012
      - change disks into single-master mode
4013

4014
    """
4015
    instance = self.instance
4016
    target_node = self.target_node
4017
    source_node = self.source_node
4018

    
4019
    self.feedback_fn("* checking disk consistency between source and target")
4020
    for dev in instance.disks:
4021
      if not _CheckDiskConsistency(self, dev, target_node, False):
4022
        raise errors.OpExecError("Disk %s is degraded or not fully"
4023
                                 " synchronized on target node,"
4024
                                 " aborting migrate." % dev.iv_name)
4025

    
4026
    # First get the migration information from the remote node
4027
    result = self.rpc.call_migration_info(source_node, instance)
4028
    msg = result.fail_msg
4029
    if msg:
4030
      log_err = ("Failed fetching source migration information from %s: %s" %
4031
                 (source_node, msg))
4032
      logging.error(log_err)
4033
      raise errors.OpExecError(log_err)
4034

    
4035
    self.migration_info = migration_info = result.payload
4036

    
4037
    # Then switch the disks to master/master mode
4038
    self._EnsureSecondary(target_node)
4039
    self._GoStandalone()
4040
    self._GoReconnect(True)
4041
    self._WaitUntilSync()
4042

    
4043
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
4044
    result = self.rpc.call_accept_instance(target_node,
4045
                                           instance,
4046
                                           migration_info,
4047
                                           self.nodes_ip[target_node])
4048

    
4049
    msg = result.fail_msg
4050
    if msg:
4051
      logging.error("Instance pre-migration failed, trying to revert"
4052
                    " disk status: %s", msg)
4053
      self._AbortMigration()
4054
      self._RevertDiskStatus()
4055
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
4056
                               (instance.name, msg))
4057

    
4058
    self.feedback_fn("* migrating instance to %s" % target_node)
4059
    time.sleep(10)
4060
    result = self.rpc.call_instance_migrate(source_node, instance,
4061
                                            self.nodes_ip[target_node],
4062
                                            self.op.live)
4063
    msg = result.fail_msg
4064
    if msg:
4065
      logging.error("Instance migration failed, trying to revert"
4066
                    " disk status: %s", msg)
4067
      self._AbortMigration()
4068
      self._RevertDiskStatus()
4069
      raise errors.OpExecError("Could not migrate instance %s: %s" %
4070
                               (instance.name, msg))
4071
    time.sleep(10)
4072

    
4073
    instance.primary_node = target_node
4074
    # distribute new instance config to the other nodes
4075
    self.cfg.Update(instance)
4076

    
4077
    result = self.rpc.call_finalize_migration(target_node,
4078
                                              instance,
4079
                                              migration_info,
4080
                                              True)
4081
    msg = result.fail_msg
4082
    if msg:
4083
      logging.error("Instance migration succeeded, but finalization failed:"
4084
                    " %s" % msg)
4085
      raise errors.OpExecError("Could not finalize instance migration: %s" %
4086
                               msg)
4087

    
4088
    self._EnsureSecondary(source_node)
4089
    self._WaitUntilSync()
4090
    self._GoStandalone()
4091
    self._GoReconnect(False)
4092
    self._WaitUntilSync()
4093

    
4094
    self.feedback_fn("* done")
4095

    
4096
  def Exec(self, feedback_fn):
4097
    """Perform the migration.
4098

4099
    """
4100
    self.feedback_fn = feedback_fn
4101

    
4102
    self.source_node = self.instance.primary_node
4103
    self.target_node = self.instance.secondary_nodes[0]
4104
    self.all_nodes = [self.source_node, self.target_node]
4105
    self.nodes_ip = {
4106
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
4107
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
4108
      }
4109
    if self.op.cleanup:
4110
      return self._ExecCleanup()
4111
    else:
4112
      return self._ExecMigration()
4113

    
4114

    
4115
def _CreateBlockDev(lu, node, instance, device, force_create,
4116
                    info, force_open):
4117
  """Create a tree of block devices on a given node.
4118

4119
  If this device type has to be created on secondaries, create it and
4120
  all its children.
4121

4122
  If not, just recurse to children keeping the same 'force' value.
4123

4124
  @param lu: the lu on whose behalf we execute
4125
  @param node: the node on which to create the device
4126
  @type instance: L{objects.Instance}
4127
  @param instance: the instance which owns the device
4128
  @type device: L{objects.Disk}
4129
  @param device: the device to create
4130
  @type force_create: boolean
4131
  @param force_create: whether to force creation of this device; this
4132
      will be change to True whenever we find a device which has
4133
      CreateOnSecondary() attribute
4134
  @param info: the extra 'metadata' we should attach to the device
4135
      (this will be represented as a LVM tag)
4136
  @type force_open: boolean
4137
  @param force_open: this parameter will be passes to the
4138
      L{backend.BlockdevCreate} function where it specifies
4139
      whether we run on primary or not, and it affects both
4140
      the child assembly and the device own Open() execution
4141

4142
  """
4143
  if device.CreateOnSecondary():
4144
    force_create = True
4145

    
4146
  if device.children:
4147
    for child in device.children:
4148
      _CreateBlockDev(lu, node, instance, child, force_create,
4149
                      info, force_open)
4150

    
4151
  if not force_create:
4152
    return
4153

    
4154
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
4155

    
4156

    
4157
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
4158
  """Create a single block device on a given node.
4159

4160
  This will not recurse over children of the device, so they must be
4161
  created in advance.
4162

4163
  @param lu: the lu on whose behalf we execute
4164
  @param node: the node on which to create the device
4165
  @type instance: L{objects.Instance}
4166
  @param instance: the instance which owns the device
4167
  @type device: L{objects.Disk}
4168
  @param device: the device to create
4169
  @param info: the extra 'metadata' we should attach to the device
4170
      (this will be represented as a LVM tag)
4171
  @type force_open: boolean
4172
  @param force_open: this parameter will be passes to the
4173
      L{backend.BlockdevCreate} function where it specifies
4174
      whether we run on primary or not, and it affects both
4175
      the child assembly and the device own Open() execution
4176

4177
  """
4178
  lu.cfg.SetDiskID(device, node)
4179
  result = lu.rpc.call_blockdev_create(node, device, device.size,
4180
                                       instance.name, force_open, info)
4181
  result.Raise("Can't create block device %s on"
4182
               " node %s for instance %s" % (device, node, instance.name))
4183
  if device.physical_id is None:
4184
    device.physical_id = result.payload
4185

    
4186

    
4187
def _GenerateUniqueNames(lu, exts):
4188
  """Generate a suitable LV name.
4189

4190
  This will generate a logical volume name for the given instance.
4191

4192
  """
4193
  results = []
4194
  for val in exts:
4195
    new_id = lu.cfg.GenerateUniqueID()
4196
    results.append("%s%s" % (new_id, val))
4197
  return results
4198

    
4199

    
4200
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
4201
                         p_minor, s_minor):
4202
  """Generate a drbd8 device complete with its children.
4203

4204
  """
4205
  port = lu.cfg.AllocatePort()
4206
  vgname = lu.cfg.GetVGName()
4207
  shared_secret = lu.cfg.GenerateDRBDSecret()
4208
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
4209
                          logical_id=(vgname, names[0]))
4210
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
4211
                          logical_id=(vgname, names[1]))
4212
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
4213
                          logical_id=(primary, secondary, port,
4214
                                      p_minor, s_minor,
4215
                                      shared_secret),
4216
                          children=[dev_data, dev_meta],
4217
                          iv_name=iv_name)
4218
  return drbd_dev
4219

    
4220

    
4221
def _GenerateDiskTemplate(lu, template_name,
4222
                          instance_name, primary_node,
4223
                          secondary_nodes, disk_info,
4224
                          file_storage_dir, file_driver,
4225
                          base_index):
4226
  """Generate the entire disk layout for a given template type.
4227

4228
  """
4229
  #TODO: compute space requirements
4230

    
4231
  vgname = lu.cfg.GetVGName()
4232
  disk_count = len(disk_info)
4233
  disks = []
4234
  if template_name == constants.DT_DISKLESS:
4235
    pass
4236
  elif template_name == constants.DT_PLAIN:
4237
    if len(secondary_nodes) != 0:
4238
      raise errors.ProgrammerError("Wrong template configuration")
4239

    
4240
    names = _GenerateUniqueNames(lu, [".disk%d" % i
4241
                                      for i in range(disk_count)])
4242
    for idx, disk in enumerate(disk_info):
4243
      disk_index = idx + base_index
4244
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
4245
                              logical_id=(vgname, names[idx]),
4246
                              iv_name="disk/%d" % disk_index,
4247
                              mode=disk["mode"])
4248
      disks.append(disk_dev)
4249
  elif template_name == constants.DT_DRBD8:
4250
    if len(secondary_nodes) != 1:
4251
      raise errors.ProgrammerError("Wrong template configuration")
4252
    remote_node = secondary_nodes[0]
4253
    minors = lu.cfg.AllocateDRBDMinor(
4254
      [primary_node, remote_node] * len(disk_info), instance_name)
4255

    
4256
    names = []
4257
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % i
4258
                                               for i in range(disk_count)]):
4259
      names.append(lv_prefix + "_data")
4260
      names.append(lv_prefix + "_meta")
4261
    for idx, disk in enumerate(disk_info):
4262
      disk_index = idx + base_index
4263
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
4264
                                      disk["size"], names[idx*2:idx*2+2],
4265
                                      "disk/%d" % disk_index,
4266
                                      minors[idx*2], minors[idx*2+1])
4267
      disk_dev.mode = disk["mode"]
4268
      disks.append(disk_dev)
4269
  elif template_name == constants.DT_FILE:
4270
    if len(secondary_nodes) != 0:
4271
      raise errors.ProgrammerError("Wrong template configuration")
4272

    
4273
    for idx, disk in enumerate(disk_info):
4274
      disk_index = idx + base_index
4275
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
4276
                              iv_name="disk/%d" % disk_index,
4277
                              logical_id=(file_driver,
4278
                                          "%s/disk%d" % (file_storage_dir,
4279
                                                         disk_index)),
4280
                              mode=disk["mode"])
4281
      disks.append(disk_dev)
4282
  else:
4283
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
4284
  return disks
4285

    
4286

    
4287
def _GetInstanceInfoText(instance):
4288
  """Compute that text that should be added to the disk's metadata.
4289

4290
  """
4291
  return "originstname+%s" % instance.name
4292

    
4293

    
4294
def _CreateDisks(lu, instance):
4295
  """Create all disks for an instance.
4296

4297
  This abstracts away some work from AddInstance.
4298

4299
  @type lu: L{LogicalUnit}
4300
  @param lu: the logical unit on whose behalf we execute
4301
  @type instance: L{objects.Instance}
4302
  @param instance: the instance whose disks we should create
4303
  @rtype: boolean
4304
  @return: the success of the creation
4305

4306
  """
4307
  info = _GetInstanceInfoText(instance)
4308
  pnode = instance.primary_node
4309

    
4310
  if instance.disk_template == constants.DT_FILE:
4311
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4312
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
4313

    
4314
    result.Raise("Failed to create directory '%s' on"
4315
                 " node %s: %s" % (file_storage_dir, pnode))
4316

    
4317
  # Note: this needs to be kept in sync with adding of disks in
4318
  # LUSetInstanceParams
4319
  for device in instance.disks:
4320
    logging.info("Creating volume %s for instance %s",
4321
                 device.iv_name, instance.name)
4322
    #HARDCODE
4323
    for node in instance.all_nodes:
4324
      f_create = node == pnode
4325
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
4326

    
4327

    
4328
def _RemoveDisks(lu, instance):
4329
  """Remove all disks for an instance.
4330

4331
  This abstracts away some work from `AddInstance()` and
4332
  `RemoveInstance()`. Note that in case some of the devices couldn't
4333
  be removed, the removal will continue with the other ones (compare
4334
  with `_CreateDisks()`).
4335

4336
  @type lu: L{LogicalUnit}
4337
  @param lu: the logical unit on whose behalf we execute
4338
  @type instance: L{objects.Instance}
4339
  @param instance: the instance whose disks we should remove
4340
  @rtype: boolean
4341
  @return: the success of the removal
4342

4343
  """
4344
  logging.info("Removing block devices for instance %s", instance.name)
4345

    
4346
  all_result = True
4347
  for device in instance.disks:
4348
    for node, disk in device.ComputeNodeTree(instance.primary_node):
4349
      lu.cfg.SetDiskID(disk, node)
4350
      msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
4351
      if msg:
4352
        lu.LogWarning("Could not remove block device %s on node %s,"
4353
                      " continuing anyway: %s", device.iv_name, node, msg)
4354
        all_result = False
4355

    
4356
  if instance.disk_template == constants.DT_FILE:
4357
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4358
    result = lu.rpc.call_file_storage_dir_remove(instance.primary_node,
4359
                                                 file_storage_dir)
4360
    msg = result.fail_msg
4361
    if msg:
4362
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
4363
                    file_storage_dir, instance.primary_node, msg)
4364
      all_result = False
4365

    
4366
  return all_result
4367

    
4368

    
4369
def _ComputeDiskSize(disk_template, disks):
4370
  """Compute disk size requirements in the volume group
4371

4372
  """
4373
  # Required free disk space as a function of disk and swap space
4374
  req_size_dict = {
4375
    constants.DT_DISKLESS: None,
4376
    constants.DT_PLAIN: sum(d["size"] for d in disks),
4377
    # 128 MB are added for drbd metadata for each disk
4378
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
4379
    constants.DT_FILE: None,
4380
  }
4381

    
4382
  if disk_template not in req_size_dict:
4383
    raise errors.ProgrammerError("Disk template '%s' size requirement"
4384
                                 " is unknown" %  disk_template)
4385

    
4386
  return req_size_dict[disk_template]
4387

    
4388

    
4389
def _CheckHVParams(lu, nodenames, hvname, hvparams):
4390
  """Hypervisor parameter validation.
4391

4392
  This function abstract the hypervisor parameter validation to be
4393
  used in both instance create and instance modify.
4394

4395
  @type lu: L{LogicalUnit}
4396
  @param lu: the logical unit for which we check
4397
  @type nodenames: list
4398
  @param nodenames: the list of nodes on which we should check
4399
  @type hvname: string
4400
  @param hvname: the name of the hypervisor we should use
4401
  @type hvparams: dict
4402
  @param hvparams: the parameters which we need to check
4403
  @raise errors.OpPrereqError: if the parameters are not valid
4404

4405
  """
4406
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
4407
                                                  hvname,
4408
                                                  hvparams)
4409
  for node in nodenames:
4410
    info = hvinfo[node]
4411
    if info.offline:
4412
      continue
4413
    info.Raise("Hypervisor parameter validation failed on node %s" % node)
4414

    
4415

    
4416
class LUCreateInstance(LogicalUnit):
4417
  """Create an instance.
4418

4419
  """
4420
  HPATH = "instance-add"
4421
  HTYPE = constants.HTYPE_INSTANCE
4422
  _OP_REQP = ["instance_name", "disks", "disk_template",
4423
              "mode", "start",
4424
              "wait_for_sync", "ip_check", "nics",
4425
              "hvparams", "beparams"]
4426
  REQ_BGL = False
4427

    
4428
  def _ExpandNode(self, node):
4429
    """Expands and checks one node name.
4430

4431
    """
4432
    node_full = self.cfg.ExpandNodeName(node)
4433
    if node_full is None:
4434
      raise errors.OpPrereqError("Unknown node %s" % node)
4435
    return node_full
4436

    
4437
  def ExpandNames(self):
4438
    """ExpandNames for CreateInstance.
4439

4440
    Figure out the right locks for instance creation.
4441

4442
    """
4443
    self.needed_locks = {}
4444

    
4445
    # set optional parameters to none if they don't exist
4446
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
4447
      if not hasattr(self.op, attr):
4448
        setattr(self.op, attr, None)
4449

    
4450
    # cheap checks, mostly valid constants given
4451

    
4452
    # verify creation mode
4453
    if self.op.mode not in (constants.INSTANCE_CREATE,
4454
                            constants.INSTANCE_IMPORT):
4455
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
4456
                                 self.op.mode)
4457

    
4458
    # disk template and mirror node verification
4459
    if self.op.disk_template not in constants.DISK_TEMPLATES:
4460
      raise errors.OpPrereqError("Invalid disk template name")
4461

    
4462
    if self.op.hypervisor is None:
4463
      self.op.hypervisor = self.cfg.GetHypervisorType()
4464

    
4465
    cluster = self.cfg.GetClusterInfo()
4466
    enabled_hvs = cluster.enabled_hypervisors
4467
    if self.op.hypervisor not in enabled_hvs:
4468
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
4469
                                 " cluster (%s)" % (self.op.hypervisor,
4470
                                  ",".join(enabled_hvs)))
4471

    
4472
    # check hypervisor parameter syntax (locally)
4473
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
4474
    filled_hvp = objects.FillDict(cluster.hvparams[self.op.hypervisor],
4475
                                  self.op.hvparams)
4476
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
4477
    hv_type.CheckParameterSyntax(filled_hvp)
4478
    self.hv_full = filled_hvp
4479

    
4480
    # fill and remember the beparams dict
4481
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
4482
    self.be_full = objects.FillDict(cluster.beparams[constants.PP_DEFAULT],
4483
                                    self.op.beparams)
4484

    
4485
    #### instance parameters check
4486

    
4487
    # instance name verification
4488
    hostname1 = utils.HostInfo(self.op.instance_name)
4489
    self.op.instance_name = instance_name = hostname1.name
4490

    
4491
    # this is just a preventive check, but someone might still add this
4492
    # instance in the meantime, and creation will fail at lock-add time
4493
    if instance_name in self.cfg.GetInstanceList():
4494
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
4495
                                 instance_name)
4496

    
4497
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
4498

    
4499
    # NIC buildup
4500
    self.nics = []
4501
    for idx, nic in enumerate(self.op.nics):
4502
      nic_mode_req = nic.get("mode", None)
4503
      nic_mode = nic_mode_req
4504
      if nic_mode is None:
4505
        nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
4506

    
4507
      # in routed mode, for the first nic, the default ip is 'auto'
4508
      if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
4509
        default_ip_mode = constants.VALUE_AUTO
4510
      else:
4511
        default_ip_mode = constants.VALUE_NONE
4512

    
4513
      # ip validity checks
4514
      ip = nic.get("ip", default_ip_mode)
4515
      if ip is None or ip.lower() == constants.VALUE_NONE:
4516
        nic_ip = None
4517
      elif ip.lower() == constants.VALUE_AUTO:
4518
        nic_ip = hostname1.ip
4519
      else:
4520
        if not utils.IsValidIP(ip):
4521
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
4522
                                     " like a valid IP" % ip)
4523
        nic_ip = ip
4524

    
4525
      # TODO: check the ip for uniqueness !!
4526
      if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
4527
        raise errors.OpPrereqError("Routed nic mode requires an ip address")
4528

    
4529
      # MAC address verification
4530
      mac = nic.get("mac", constants.VALUE_AUTO)
4531
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4532
        if not utils.IsValidMac(mac.lower()):
4533
          raise errors.OpPrereqError("Invalid MAC address specified: %s" %
4534
                                     mac)
4535
      # bridge verification
4536
      bridge = nic.get("bridge", None)
4537
      link = nic.get("link", None)
4538
      if bridge and link:
4539
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link' at the same time")
4540
      elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
4541
        raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic")
4542
      elif bridge:
4543
        link = bridge
4544

    
4545
      nicparams = {}
4546
      if nic_mode_req:
4547
        nicparams[constants.NIC_MODE] = nic_mode_req
4548
      if link:
4549
        nicparams[constants.NIC_LINK] = link
4550

    
4551
      check_params = objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
4552
                                      nicparams)
4553
      objects.NIC.CheckParameterSyntax(check_params)
4554
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
4555

    
4556
    # disk checks/pre-build
4557
    self.disks = []
4558
    for disk in self.op.disks:
4559
      mode = disk.get("mode", constants.DISK_RDWR)
4560
      if mode not in constants.DISK_ACCESS_SET:
4561
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
4562
                                   mode)
4563
      size = disk.get("size", None)
4564
      if size is None:
4565
        raise errors.OpPrereqError("Missing disk size")
4566
      try:
4567
        size = int(size)
4568
      except ValueError:
4569
        raise errors.OpPrereqError("Invalid disk size '%s'" % size)
4570
      self.disks.append({"size": size, "mode": mode})
4571

    
4572
    # used in CheckPrereq for ip ping check
4573
    self.check_ip = hostname1.ip
4574

    
4575
    # file storage checks
4576
    if (self.op.file_driver and
4577
        not self.op.file_driver in constants.FILE_DRIVER):
4578
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
4579
                                 self.op.file_driver)
4580

    
4581
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
4582
      raise errors.OpPrereqError("File storage directory path not absolute")
4583

    
4584
    ### Node/iallocator related checks
4585
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
4586
      raise errors.OpPrereqError("One and only one of iallocator and primary"
4587
                                 " node must be given")
4588

    
4589
    if self.op.iallocator:
4590
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4591
    else:
4592
      self.op.pnode = self._ExpandNode(self.op.pnode)
4593
      nodelist = [self.op.pnode]
4594
      if self.op.snode is not None:
4595
        self.op.snode = self._ExpandNode(self.op.snode)
4596
        nodelist.append(self.op.snode)
4597
      self.needed_locks[locking.LEVEL_NODE] = nodelist
4598

    
4599
    # in case of import lock the source node too
4600
    if self.op.mode == constants.INSTANCE_IMPORT:
4601
      src_node = getattr(self.op, "src_node", None)
4602
      src_path = getattr(self.op, "src_path", None)
4603

    
4604
      if src_path is None:
4605
        self.op.src_path = src_path = self.op.instance_name
4606

    
4607
      if src_node is None:
4608
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4609
        self.op.src_node = None
4610
        if os.path.isabs(src_path):
4611
          raise errors.OpPrereqError("Importing an instance from an absolute"
4612
                                     " path requires a source node option.")
4613
      else:
4614
        self.op.src_node = src_node = self._ExpandNode(src_node)
4615
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4616
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
4617
        if not os.path.isabs(src_path):
4618
          self.op.src_path = src_path = \
4619
            os.path.join(constants.EXPORT_DIR, src_path)
4620

    
4621
    else: # INSTANCE_CREATE
4622
      if getattr(self.op, "os_type", None) is None:
4623
        raise errors.OpPrereqError("No guest OS specified")
4624

    
4625
  def _RunAllocator(self):
4626
    """Run the allocator based on input opcode.
4627

4628
    """
4629
    nics = [n.ToDict() for n in self.nics]
4630
    ial = IAllocator(self,
4631
                     mode=constants.IALLOCATOR_MODE_ALLOC,
4632
                     name=self.op.instance_name,
4633
                     disk_template=self.op.disk_template,
4634
                     tags=[],
4635
                     os=self.op.os_type,
4636
                     vcpus=self.be_full[constants.BE_VCPUS],
4637
                     mem_size=self.be_full[constants.BE_MEMORY],
4638
                     disks=self.disks,
4639
                     nics=nics,
4640
                     hypervisor=self.op.hypervisor,
4641
                     )
4642

    
4643
    ial.Run(self.op.iallocator)
4644

    
4645
    if not ial.success:
4646
      raise errors.OpPrereqError("Can't compute nodes using"
4647
                                 " iallocator '%s': %s" % (self.op.iallocator,
4648
                                                           ial.info))
4649
    if len(ial.nodes) != ial.required_nodes:
4650
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4651
                                 " of nodes (%s), required %s" %
4652
                                 (self.op.iallocator, len(ial.nodes),
4653
                                  ial.required_nodes))
4654
    self.op.pnode = ial.nodes[0]
4655
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
4656
                 self.op.instance_name, self.op.iallocator,
4657
                 ", ".join(ial.nodes))
4658
    if ial.required_nodes == 2:
4659
      self.op.snode = ial.nodes[1]
4660

    
4661
  def BuildHooksEnv(self):
4662
    """Build hooks env.
4663

4664
    This runs on master, primary and secondary nodes of the instance.
4665

4666
    """
4667
    env = {
4668
      "ADD_MODE": self.op.mode,
4669
      }
4670
    if self.op.mode == constants.INSTANCE_IMPORT:
4671
      env["SRC_NODE"] = self.op.src_node
4672
      env["SRC_PATH"] = self.op.src_path
4673
      env["SRC_IMAGES"] = self.src_images
4674

    
4675
    env.update(_BuildInstanceHookEnv(
4676
      name=self.op.instance_name,
4677
      primary_node=self.op.pnode,
4678
      secondary_nodes=self.secondaries,
4679
      status=self.op.start,
4680
      os_type=self.op.os_type,
4681
      memory=self.be_full[constants.BE_MEMORY],
4682
      vcpus=self.be_full[constants.BE_VCPUS],
4683
      nics=_NICListToTuple(self, self.nics),
4684
      disk_template=self.op.disk_template,
4685
      disks=[(d["size"], d["mode"]) for d in self.disks],
4686
      bep=self.be_full,
4687
      hvp=self.hv_full,
4688
      hypervisor=self.op.hypervisor,
4689
    ))
4690

    
4691
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
4692
          self.secondaries)
4693
    return env, nl, nl
4694

    
4695

    
4696
  def CheckPrereq(self):
4697
    """Check prerequisites.
4698

4699
    """
4700
    if (not self.cfg.GetVGName() and
4701
        self.op.disk_template not in constants.DTS_NOT_LVM):
4702
      raise errors.OpPrereqError("Cluster does not support lvm-based"
4703
                                 " instances")
4704

    
4705
    if self.op.mode == constants.INSTANCE_IMPORT:
4706
      src_node = self.op.src_node
4707
      src_path = self.op.src_path
4708

    
4709
      if src_node is None:
4710
        locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
4711
        exp_list = self.rpc.call_export_list(locked_nodes)
4712
        found = False
4713
        for node in exp_list:
4714
          if exp_list[node].fail_msg:
4715
            continue
4716
          if src_path in exp_list[node].payload:
4717
            found = True
4718
            self.op.src_node = src_node = node
4719
            self.op.src_path = src_path = os.path.join(constants.EXPORT_DIR,
4720
                                                       src_path)
4721
            break
4722
        if not found:
4723
          raise errors.OpPrereqError("No export found for relative path %s" %
4724
                                      src_path)
4725

    
4726
      _CheckNodeOnline(self, src_node)
4727
      result = self.rpc.call_export_info(src_node, src_path)
4728
      result.Raise("No export or invalid export found in dir %s" % src_path)
4729

    
4730
      export_info = objects.SerializableConfigParser.Loads(str(result.payload))
4731
      if not export_info.has_section(constants.INISECT_EXP):
4732
        raise errors.ProgrammerError("Corrupted export config")
4733

    
4734
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
4735
      if (int(ei_version) != constants.EXPORT_VERSION):
4736
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
4737
                                   (ei_version, constants.EXPORT_VERSION))
4738

    
4739
      # Check that the new instance doesn't have less disks than the export
4740
      instance_disks = len(self.disks)
4741
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
4742
      if instance_disks < export_disks:
4743
        raise errors.OpPrereqError("Not enough disks to import."
4744
                                   " (instance: %d, export: %d)" %
4745
                                   (instance_disks, export_disks))
4746

    
4747
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
4748
      disk_images = []
4749
      for idx in range(export_disks):
4750
        option = 'disk%d_dump' % idx
4751
        if export_info.has_option(constants.INISECT_INS, option):
4752
          # FIXME: are the old os-es, disk sizes, etc. useful?
4753
          export_name = export_info.get(constants.INISECT_INS, option)
4754
          image = os.path.join(src_path, export_name)
4755
          disk_images.append(image)
4756
        else:
4757
          disk_images.append(False)
4758

    
4759
      self.src_images = disk_images
4760

    
4761
      old_name = export_info.get(constants.INISECT_INS, 'name')
4762
      # FIXME: int() here could throw a ValueError on broken exports
4763
      exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
4764
      if self.op.instance_name == old_name:
4765
        for idx, nic in enumerate(self.nics):
4766
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
4767
            nic_mac_ini = 'nic%d_mac' % idx
4768
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
4769

    
4770
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
4771
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
4772
    if self.op.start and not self.op.ip_check:
4773
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
4774
                                 " adding an instance in start mode")
4775

    
4776
    if self.op.ip_check:
4777
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
4778
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
4779
                                   (self.check_ip, self.op.instance_name))
4780

    
4781
    #### mac address generation
4782
    # By generating here the mac address both the allocator and the hooks get
4783
    # the real final mac address rather than the 'auto' or 'generate' value.
4784
    # There is a race condition between the generation and the instance object
4785
    # creation, which means that we know the mac is valid now, but we're not
4786
    # sure it will be when we actually add the instance. If things go bad
4787
    # adding the instance will abort because of a duplicate mac, and the
4788
    # creation job will fail.
4789
    for nic in self.nics:
4790
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4791
        nic.mac = self.cfg.GenerateMAC()
4792

    
4793
    #### allocator run
4794

    
4795
    if self.op.iallocator is not None:
4796
      self._RunAllocator()
4797

    
4798
    #### node related checks
4799

    
4800
    # check primary node
4801
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
4802
    assert self.pnode is not None, \
4803
      "Cannot retrieve locked node %s" % self.op.pnode
4804
    if pnode.offline:
4805
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
4806
                                 pnode.name)
4807
    if pnode.drained:
4808
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
4809
                                 pnode.name)
4810

    
4811
    self.secondaries = []
4812

    
4813
    # mirror node verification
4814
    if self.op.disk_template in constants.DTS_NET_MIRROR:
4815
      if self.op.snode is None:
4816
        raise errors.OpPrereqError("The networked disk templates need"
4817
                                   " a mirror node")
4818
      if self.op.snode == pnode.name:
4819
        raise errors.OpPrereqError("The secondary node cannot be"
4820
                                   " the primary node.")
4821
      _CheckNodeOnline(self, self.op.snode)
4822
      _CheckNodeNotDrained(self, self.op.snode)
4823
      self.secondaries.append(self.op.snode)
4824

    
4825
    nodenames = [pnode.name] + self.secondaries
4826

    
4827
    req_size = _ComputeDiskSize(self.op.disk_template,
4828
                                self.disks)
4829

    
4830
    # Check lv size requirements
4831
    if req_size is not None:
4832
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4833
                                         self.op.hypervisor)
4834
      for node in nodenames:
4835
        info = nodeinfo[node]
4836
        info.Raise("Cannot get current information from node %s" % node)
4837
        info = info.payload
4838
        vg_free = info.get('vg_free', None)
4839
        if not isinstance(vg_free, int):
4840
          raise errors.OpPrereqError("Can't compute free disk space on"
4841
                                     " node %s" % node)
4842
        if req_size > vg_free:
4843
          raise errors.OpPrereqError("Not enough disk space on target node %s."
4844
                                     " %d MB available, %d MB required" %
4845
                                     (node, vg_free, req_size))
4846

    
4847
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
4848

    
4849
    # os verification
4850
    result = self.rpc.call_os_get(pnode.name, self.op.os_type)
4851
    result.Raise("OS '%s' not in supported os list for primary node %s" %
4852
                 (self.op.os_type, pnode.name), prereq=True)
4853

    
4854
    _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
4855

    
4856
    # memory check on primary node
4857
    if self.op.start:
4858
      _CheckNodeFreeMemory(self, self.pnode.name,
4859
                           "creating instance %s" % self.op.instance_name,
4860
                           self.be_full[constants.BE_MEMORY],
4861
                           self.op.hypervisor)
4862

    
4863
  def Exec(self, feedback_fn):
4864
    """Create and add the instance to the cluster.
4865

4866
    """
4867
    instance = self.op.instance_name
4868
    pnode_name = self.pnode.name
4869

    
4870
    ht_kind = self.op.hypervisor
4871
    if ht_kind in constants.HTS_REQ_PORT:
4872
      network_port = self.cfg.AllocatePort()
4873
    else:
4874
      network_port = None
4875

    
4876
    ##if self.op.vnc_bind_address is None:
4877
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
4878

    
4879
    # this is needed because os.path.join does not accept None arguments
4880
    if self.op.file_storage_dir is None:
4881
      string_file_storage_dir = ""
4882
    else:
4883
      string_file_storage_dir = self.op.file_storage_dir
4884

    
4885
    # build the full file storage dir path
4886
    file_storage_dir = os.path.normpath(os.path.join(
4887
                                        self.cfg.GetFileStorageDir(),
4888
                                        string_file_storage_dir, instance))
4889

    
4890

    
4891
    disks = _GenerateDiskTemplate(self,
4892
                                  self.op.disk_template,
4893
                                  instance, pnode_name,
4894
                                  self.secondaries,
4895
                                  self.disks,
4896
                                  file_storage_dir,
4897
                                  self.op.file_driver,
4898
                                  0)
4899

    
4900
    iobj = objects.Instance(name=instance, os=self.op.os_type,
4901
                            primary_node=pnode_name,
4902
                            nics=self.nics, disks=disks,
4903
                            disk_template=self.op.disk_template,
4904
                            admin_up=False,
4905
                            network_port=network_port,
4906
                            beparams=self.op.beparams,
4907
                            hvparams=self.op.hvparams,
4908
                            hypervisor=self.op.hypervisor,
4909
                            )
4910

    
4911
    feedback_fn("* creating instance disks...")
4912
    try:
4913
      _CreateDisks(self, iobj)
4914
    except errors.OpExecError:
4915
      self.LogWarning("Device creation failed, reverting...")
4916
      try:
4917
        _RemoveDisks(self, iobj)
4918
      finally:
4919
        self.cfg.ReleaseDRBDMinors(instance)
4920
        raise
4921

    
4922
    feedback_fn("adding instance %s to cluster config" % instance)
4923

    
4924
    self.cfg.AddInstance(iobj)
4925
    # Declare that we don't want to remove the instance lock anymore, as we've
4926
    # added the instance to the config
4927
    del self.remove_locks[locking.LEVEL_INSTANCE]
4928
    # Unlock all the nodes
4929
    if self.op.mode == constants.INSTANCE_IMPORT:
4930
      nodes_keep = [self.op.src_node]
4931
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
4932
                       if node != self.op.src_node]
4933
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
4934
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
4935
    else:
4936
      self.context.glm.release(locking.LEVEL_NODE)
4937
      del self.acquired_locks[locking.LEVEL_NODE]
4938

    
4939
    if self.op.wait_for_sync:
4940
      disk_abort = not _WaitForSync(self, iobj)
4941
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
4942
      # make sure the disks are not degraded (still sync-ing is ok)
4943
      time.sleep(15)
4944
      feedback_fn("* checking mirrors status")
4945
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
4946
    else:
4947
      disk_abort = False
4948

    
4949
    if disk_abort:
4950
      _RemoveDisks(self, iobj)
4951
      self.cfg.RemoveInstance(iobj.name)
4952
      # Make sure the instance lock gets removed
4953
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
4954
      raise errors.OpExecError("There are some degraded disks for"
4955
                               " this instance")
4956

    
4957
    feedback_fn("creating os for instance %s on node %s" %
4958
                (instance, pnode_name))
4959

    
4960
    if iobj.disk_template != constants.DT_DISKLESS:
4961
      if self.op.mode == constants.INSTANCE_CREATE:
4962
        feedback_fn("* running the instance OS create scripts...")
4963
        result = self.rpc.call_instance_os_add(pnode_name, iobj, False)
4964
        result.Raise("Could not add os for instance %s"
4965
                     " on node %s" % (instance, pnode_name))
4966

    
4967
      elif self.op.mode == constants.INSTANCE_IMPORT:
4968
        feedback_fn("* running the instance OS import scripts...")
4969
        src_node = self.op.src_node
4970
        src_images = self.src_images
4971
        cluster_name = self.cfg.GetClusterName()
4972
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
4973
                                                         src_node, src_images,
4974
                                                         cluster_name)
4975
        msg = import_result.fail_msg
4976
        if msg:
4977
          self.LogWarning("Error while importing the disk images for instance"
4978
                          " %s on node %s: %s" % (instance, pnode_name, msg))
4979
      else:
4980
        # also checked in the prereq part
4981
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
4982
                                     % self.op.mode)
4983

    
4984
    if self.op.start:
4985
      iobj.admin_up = True
4986
      self.cfg.Update(iobj)
4987
      logging.info("Starting instance %s on node %s", instance, pnode_name)
4988
      feedback_fn("* starting instance...")
4989
      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
4990
      result.Raise("Could not start instance")
4991

    
4992

    
4993
class LUConnectConsole(NoHooksLU):
4994
  """Connect to an instance's console.
4995

4996
  This is somewhat special in that it returns the command line that
4997
  you need to run on the master node in order to connect to the
4998
  console.
4999

5000
  """
5001
  _OP_REQP = ["instance_name"]
5002
  REQ_BGL = False
5003

    
5004
  def ExpandNames(self):
5005
    self._ExpandAndLockInstance()
5006

    
5007
  def CheckPrereq(self):
5008
    """Check prerequisites.
5009

5010
    This checks that the instance is in the cluster.
5011

5012
    """
5013
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5014
    assert self.instance is not None, \
5015
      "Cannot retrieve locked instance %s" % self.op.instance_name
5016
    _CheckNodeOnline(self, self.instance.primary_node)
5017

    
5018
  def Exec(self, feedback_fn):
5019
    """Connect to the console of an instance
5020

5021
    """
5022
    instance = self.instance
5023
    node = instance.primary_node
5024

    
5025
    node_insts = self.rpc.call_instance_list([node],
5026
                                             [instance.hypervisor])[node]
5027
    node_insts.Raise("Can't get node information from %s" % node)
5028

    
5029
    if instance.name not in node_insts.payload:
5030
      raise errors.OpExecError("Instance %s is not running." % instance.name)
5031

    
5032
    logging.debug("Connecting to console of %s on %s", instance.name, node)
5033

    
5034
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
5035
    cluster = self.cfg.GetClusterInfo()
5036
    # beparams and hvparams are passed separately, to avoid editing the
5037
    # instance and then saving the defaults in the instance itself.
5038
    hvparams = cluster.FillHV(instance)
5039
    beparams = cluster.FillBE(instance)
5040
    console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
5041

    
5042
    # build ssh cmdline
5043
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
5044

    
5045

    
5046
class LUReplaceDisks(LogicalUnit):
5047
  """Replace the disks of an instance.
5048

5049
  """
5050
  HPATH = "mirrors-replace"
5051
  HTYPE = constants.HTYPE_INSTANCE
5052
  _OP_REQP = ["instance_name", "mode", "disks"]
5053
  REQ_BGL = False
5054

    
5055
  def CheckArguments(self):
5056
    if not hasattr(self.op, "remote_node"):
5057
      self.op.remote_node = None
5058
    if not hasattr(self.op, "iallocator"):
5059
      self.op.iallocator = None
5060

    
5061
    # check for valid parameter combination
5062
    cnt = [self.op.remote_node, self.op.iallocator].count(None)
5063
    if self.op.mode == constants.REPLACE_DISK_CHG:
5064
      if cnt == 2:
5065
        raise errors.OpPrereqError("When changing the secondary either an"
5066
                                   " iallocator script must be used or the"
5067
                                   " new node given")
5068
      elif cnt == 0:
5069
        raise errors.OpPrereqError("Give either the iallocator or the new"
5070
                                   " secondary, not both")
5071
    else: # not replacing the secondary
5072
      if cnt != 2:
5073
        raise errors.OpPrereqError("The iallocator and new node options can"
5074
                                   " be used only when changing the"
5075
                                   " secondary node")
5076

    
5077
  def ExpandNames(self):
5078
    self._ExpandAndLockInstance()
5079

    
5080
    if self.op.iallocator is not None:
5081
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5082
    elif self.op.remote_node is not None:
5083
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
5084
      if remote_node is None:
5085
        raise errors.OpPrereqError("Node '%s' not known" %
5086
                                   self.op.remote_node)
5087
      self.op.remote_node = remote_node
5088
      # Warning: do not remove the locking of the new secondary here
5089
      # unless DRBD8.AddChildren is changed to work in parallel;
5090
      # currently it doesn't since parallel invocations of
5091
      # FindUnusedMinor will conflict
5092
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
5093
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5094
    else:
5095
      self.needed_locks[locking.LEVEL_NODE] = []
5096
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5097

    
5098
  def DeclareLocks(self, level):
5099
    # If we're not already locking all nodes in the set we have to declare the
5100
    # instance's primary/secondary nodes.
5101
    if (level == locking.LEVEL_NODE and
5102
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
5103
      self._LockInstancesNodes()
5104

    
5105
  def _RunAllocator(self):
5106
    """Compute a new secondary node using an IAllocator.
5107

5108
    """
5109
    ial = IAllocator(self,
5110
                     mode=constants.IALLOCATOR_MODE_RELOC,
5111
                     name=self.op.instance_name,
5112
                     relocate_from=[self.sec_node])
5113

    
5114
    ial.Run(self.op.iallocator)
5115

    
5116
    if not ial.success:
5117
      raise errors.OpPrereqError("Can't compute nodes using"
5118
                                 " iallocator '%s': %s" % (self.op.iallocator,
5119
                                                           ial.info))
5120
    if len(ial.nodes) != ial.required_nodes:
5121
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
5122
                                 " of nodes (%s), required %s" %
5123
                                 (len(ial.nodes), ial.required_nodes))
5124
    self.op.remote_node = ial.nodes[0]
5125
    self.LogInfo("Selected new secondary for the instance: %s",
5126
                 self.op.remote_node)
5127

    
5128
  def BuildHooksEnv(self):
5129
    """Build hooks env.
5130

5131
    This runs on the master, the primary and all the secondaries.
5132

5133
    """
5134
    env = {
5135
      "MODE": self.op.mode,
5136
      "NEW_SECONDARY": self.op.remote_node,
5137
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
5138
      }
5139
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5140
    nl = [
5141
      self.cfg.GetMasterNode(),
5142
      self.instance.primary_node,
5143
      ]
5144
    if self.op.remote_node is not None:
5145
      nl.append(self.op.remote_node)
5146
    return env, nl, nl
5147

    
5148
  def CheckPrereq(self):
5149
    """Check prerequisites.
5150

5151
    This checks that the instance is in the cluster.
5152

5153
    """
5154
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5155
    assert instance is not None, \
5156
      "Cannot retrieve locked instance %s" % self.op.instance_name
5157
    self.instance = instance
5158

    
5159
    if instance.disk_template != constants.DT_DRBD8:
5160
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
5161
                                 " instances")
5162

    
5163
    if len(instance.secondary_nodes) != 1:
5164
      raise errors.OpPrereqError("The instance has a strange layout,"
5165
                                 " expected one secondary but found %d" %
5166
                                 len(instance.secondary_nodes))
5167

    
5168
    self.sec_node = instance.secondary_nodes[0]
5169

    
5170
    if self.op.iallocator is not None:
5171
      self._RunAllocator()
5172

    
5173
    remote_node = self.op.remote_node
5174
    if remote_node is not None:
5175
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
5176
      assert self.remote_node_info is not None, \
5177
        "Cannot retrieve locked node %s" % remote_node
5178
    else:
5179
      self.remote_node_info = None
5180
    if remote_node == instance.primary_node:
5181
      raise errors.OpPrereqError("The specified node is the primary node of"
5182
                                 " the instance.")
5183
    elif remote_node == self.sec_node:
5184
      raise errors.OpPrereqError("The specified node is already the"
5185
                                 " secondary node of the instance.")
5186

    
5187
    if self.op.mode == constants.REPLACE_DISK_PRI:
5188
      n1 = self.tgt_node = instance.primary_node
5189
      n2 = self.oth_node = self.sec_node
5190
    elif self.op.mode == constants.REPLACE_DISK_SEC:
5191
      n1 = self.tgt_node = self.sec_node
5192
      n2 = self.oth_node = instance.primary_node
5193
    elif self.op.mode == constants.REPLACE_DISK_CHG:
5194
      n1 = self.new_node = remote_node
5195
      n2 = self.oth_node = instance.primary_node
5196
      self.tgt_node = self.sec_node
5197
      _CheckNodeNotDrained(self, remote_node)
5198
    else:
5199
      raise errors.ProgrammerError("Unhandled disk replace mode")
5200

    
5201
    _CheckNodeOnline(self, n1)
5202
    _CheckNodeOnline(self, n2)
5203

    
5204
    if not self.op.disks:
5205
      self.op.disks = range(len(instance.disks))
5206

    
5207
    for disk_idx in self.op.disks:
5208
      instance.FindDisk(disk_idx)
5209

    
5210
  def _ExecD8DiskOnly(self, feedback_fn):
5211
    """Replace a disk on the primary or secondary for dbrd8.
5212

5213
    The algorithm for replace is quite complicated:
5214

5215
      1. for each disk to be replaced:
5216

5217
        1. create new LVs on the target node with unique names
5218
        1. detach old LVs from the drbd device
5219
        1. rename old LVs to name_replaced.<time_t>
5220
        1. rename new LVs to old LVs
5221
        1. attach the new LVs (with the old names now) to the drbd device
5222

5223
      1. wait for sync across all devices
5224

5225
      1. for each modified disk:
5226

5227
        1. remove old LVs (which have the name name_replaces.<time_t>)
5228

5229
    Failures are not very well handled.
5230

5231
    """
5232
    steps_total = 6
5233
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
5234
    instance = self.instance
5235
    iv_names = {}
5236
    vgname = self.cfg.GetVGName()
5237
    # start of work
5238
    cfg = self.cfg
5239
    tgt_node = self.tgt_node
5240
    oth_node = self.oth_node
5241

    
5242
    # Step: check device activation
5243
    self.proc.LogStep(1, steps_total, "check device existence")
5244
    info("checking volume groups")
5245
    my_vg = cfg.GetVGName()
5246
    results = self.rpc.call_vg_list([oth_node, tgt_node])
5247
    if not results:
5248
      raise errors.OpExecError("Can't list volume groups on the nodes")
5249
    for node in oth_node, tgt_node:
5250
      res = results[node]
5251
      res.Raise("Error checking node %s" % node)
5252
      if my_vg not in res.payload:
5253
        raise errors.OpExecError("Volume group '%s' not found on %s" %
5254
                                 (my_vg, node))
5255
    for idx, dev in enumerate(instance.disks):
5256
      if idx not in self.op.disks:
5257
        continue
5258
      for node in tgt_node, oth_node:
5259
        info("checking disk/%d on %s" % (idx, node))
5260
        cfg.SetDiskID(dev, node)
5261
        result = self.rpc.call_blockdev_find(node, dev)
5262
        msg = result.fail_msg
5263
        if not msg and not result.payload:
5264
          msg = "disk not found"
5265
        if msg:
5266
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5267
                                   (idx, node, msg))
5268

    
5269
    # Step: check other node consistency
5270
    self.proc.LogStep(2, steps_total, "check peer consistency")
5271
    for idx, dev in enumerate(instance.disks):
5272
      if idx not in self.op.disks:
5273
        continue
5274
      info("checking disk/%d consistency on %s" % (idx, oth_node))
5275
      if not _CheckDiskConsistency(self, dev, oth_node,
5276
                                   oth_node==instance.primary_node):
5277
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
5278
                                 " to replace disks on this node (%s)" %
5279
                                 (oth_node, tgt_node))
5280

    
5281
    # Step: create new storage
5282
    self.proc.LogStep(3, steps_total, "allocate new storage")
5283
    for idx, dev in enumerate(instance.disks):
5284
      if idx not in self.op.disks:
5285
        continue
5286
      size = dev.size
5287
      cfg.SetDiskID(dev, tgt_node)
5288
      lv_names = [".disk%d_%s" % (idx, suf)
5289
                  for suf in ["data", "meta"]]
5290
      names = _GenerateUniqueNames(self, lv_names)
5291
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
5292
                             logical_id=(vgname, names[0]))
5293
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
5294
                             logical_id=(vgname, names[1]))
5295
      new_lvs = [lv_data, lv_meta]
5296
      old_lvs = dev.children
5297
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
5298
      info("creating new local storage on %s for %s" %
5299
           (tgt_node, dev.iv_name))
5300
      # we pass force_create=True to force the LVM creation
5301
      for new_lv in new_lvs:
5302
        _CreateBlockDev(self, tgt_node, instance, new_lv, True,
5303
                        _GetInstanceInfoText(instance), False)
5304

    
5305
    # Step: for each lv, detach+rename*2+attach
5306
    self.proc.LogStep(4, steps_total, "change drbd configuration")
5307
    for dev, old_lvs, new_lvs in iv_names.itervalues():
5308
      info("detaching %s drbd from local storage" % dev.iv_name)
5309
      result = self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs)
5310
      result.Raise("Can't detach drbd from local storage on node"
5311
                   " %s for device %s" % (tgt_node, dev.iv_name))
5312
      #dev.children = []
5313
      #cfg.Update(instance)
5314

    
5315
      # ok, we created the new LVs, so now we know we have the needed
5316
      # storage; as such, we proceed on the target node to rename
5317
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
5318
      # using the assumption that logical_id == physical_id (which in
5319
      # turn is the unique_id on that node)
5320

    
5321
      # FIXME(iustin): use a better name for the replaced LVs
5322
      temp_suffix = int(time.time())
5323
      ren_fn = lambda d, suff: (d.physical_id[0],
5324
                                d.physical_id[1] + "_replaced-%s" % suff)
5325
      # build the rename list based on what LVs exist on the node
5326
      rlist = []
5327
      for to_ren in old_lvs:
5328
        result = self.rpc.call_blockdev_find(tgt_node, to_ren)
5329
        if not result.fail_msg and result.payload:
5330
          # device exists
5331
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
5332

    
5333
      info("renaming the old LVs on the target node")
5334
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5335
      result.Raise("Can't rename old LVs on node %s" % tgt_node)
5336
      # now we rename the new LVs to the old LVs
5337
      info("renaming the new LVs on the target node")
5338
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
5339
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5340
      result.Raise("Can't rename new LVs on node %s" % tgt_node)
5341

    
5342
      for old, new in zip(old_lvs, new_lvs):
5343
        new.logical_id = old.logical_id
5344
        cfg.SetDiskID(new, tgt_node)
5345

    
5346
      for disk in old_lvs:
5347
        disk.logical_id = ren_fn(disk, temp_suffix)
5348
        cfg.SetDiskID(disk, tgt_node)
5349

    
5350
      # now that the new lvs have the old name, we can add them to the device
5351
      info("adding new mirror component on %s" % tgt_node)
5352
      result = self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs)
5353
      msg = result.fail_msg
5354
      if msg:
5355
        for new_lv in new_lvs:
5356
          msg2 = self.rpc.call_blockdev_remove(tgt_node, new_lv).fail_msg
5357
          if msg2:
5358
            warning("Can't rollback device %s: %s", dev, msg2,
5359
                    hint="cleanup manually the unused logical volumes")
5360
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
5361

    
5362
      dev.children = new_lvs
5363
      cfg.Update(instance)
5364

    
5365
    # Step: wait for sync
5366

    
5367
    # this can fail as the old devices are degraded and _WaitForSync
5368
    # does a combined result over all disks, so we don't check its
5369
    # return value
5370
    self.proc.LogStep(5, steps_total, "sync devices")
5371
    _WaitForSync(self, instance, unlock=True)
5372

    
5373
    # so check manually all the devices
5374
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5375
      cfg.SetDiskID(dev, instance.primary_node)
5376
      result = self.rpc.call_blockdev_find(instance.primary_node, dev)
5377
      msg = result.fail_msg
5378
      if not msg and not result.payload:
5379
        msg = "disk not found"
5380
      if msg:
5381
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
5382
                                 (name, msg))
5383
      if result.payload[5]:
5384
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
5385

    
5386
    # Step: remove old storage
5387
    self.proc.LogStep(6, steps_total, "removing old storage")
5388
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5389
      info("remove logical volumes for %s" % name)
5390
      for lv in old_lvs:
5391
        cfg.SetDiskID(lv, tgt_node)
5392
        msg = self.rpc.call_blockdev_remove(tgt_node, lv).fail_msg
5393
        if msg:
5394
          warning("Can't remove old LV: %s" % msg,
5395
                  hint="manually remove unused LVs")
5396
          continue
5397

    
5398
  def _ExecD8Secondary(self, feedback_fn):
5399
    """Replace the secondary node for drbd8.
5400

5401
    The algorithm for replace is quite complicated:
5402
      - for all disks of the instance:
5403
        - create new LVs on the new node with same names
5404
        - shutdown the drbd device on the old secondary
5405
        - disconnect the drbd network on the primary
5406
        - create the drbd device on the new secondary
5407
        - network attach the drbd on the primary, using an artifice:
5408
          the drbd code for Attach() will connect to the network if it
5409
          finds a device which is connected to the good local disks but
5410
          not network enabled
5411
      - wait for sync across all devices
5412
      - remove all disks from the old secondary
5413

5414
    Failures are not very well handled.
5415

5416
    """
5417
    steps_total = 6
5418
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
5419
    instance = self.instance
5420
    iv_names = {}
5421
    # start of work
5422
    cfg = self.cfg
5423
    old_node = self.tgt_node
5424
    new_node = self.new_node
5425
    pri_node = instance.primary_node
5426
    nodes_ip = {
5427
      old_node: self.cfg.GetNodeInfo(old_node).secondary_ip,
5428
      new_node: self.cfg.GetNodeInfo(new_node).secondary_ip,
5429
      pri_node: self.cfg.GetNodeInfo(pri_node).secondary_ip,
5430
      }
5431

    
5432
    # Step: check device activation
5433
    self.proc.LogStep(1, steps_total, "check device existence")
5434
    info("checking volume groups")
5435
    my_vg = cfg.GetVGName()
5436
    results = self.rpc.call_vg_list([pri_node, new_node])
5437
    for node in pri_node, new_node:
5438
      res = results[node]
5439
      res.Raise("Error checking node %s" % node)
5440
      if my_vg not in res.payload:
5441
        raise errors.OpExecError("Volume group '%s' not found on %s" %
5442
                                 (my_vg, node))
5443
    for idx, dev in enumerate(instance.disks):
5444
      if idx not in self.op.disks:
5445
        continue
5446
      info("checking disk/%d on %s" % (idx, pri_node))
5447
      cfg.SetDiskID(dev, pri_node)
5448
      result = self.rpc.call_blockdev_find(pri_node, dev)
5449
      msg = result.fail_msg
5450
      if not msg and not result.payload:
5451
        msg = "disk not found"
5452
      if msg:
5453
        raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5454
                                 (idx, pri_node, msg))
5455

    
5456
    # Step: check other node consistency
5457
    self.proc.LogStep(2, steps_total, "check peer consistency")
5458
    for idx, dev in enumerate(instance.disks):
5459
      if idx not in self.op.disks:
5460
        continue
5461
      info("checking disk/%d consistency on %s" % (idx, pri_node))
5462
      if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
5463
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
5464
                                 " unsafe to replace the secondary" %
5465
                                 pri_node)
5466

    
5467
    # Step: create new storage
5468
    self.proc.LogStep(3, steps_total, "allocate new storage")
5469
    for idx, dev in enumerate(instance.disks):
5470
      info("adding new local storage on %s for disk/%d" %
5471
           (new_node, idx))
5472
      # we pass force_create=True to force LVM creation
5473
      for new_lv in dev.children:
5474
        _CreateBlockDev(self, new_node, instance, new_lv, True,
5475
                        _GetInstanceInfoText(instance), False)
5476

    
5477
    # Step 4: dbrd minors and drbd setups changes
5478
    # after this, we must manually remove the drbd minors on both the
5479
    # error and the success paths
5480
    minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
5481
                                   instance.name)
5482
    logging.debug("Allocated minors %s" % (minors,))
5483
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
5484
    for idx, (dev, new_minor) in enumerate(zip(instance.disks, minors)):
5485
      size = dev.size
5486
      info("activating a new drbd on %s for disk/%d" % (new_node, idx))
5487
      # create new devices on new_node; note that we create two IDs:
5488
      # one without port, so the drbd will be activated without
5489
      # networking information on the new node at this stage, and one
5490
      # with network, for the latter activation in step 4
5491
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
5492
      if pri_node == o_node1:
5493
        p_minor = o_minor1
5494
      else:
5495
        p_minor = o_minor2
5496

    
5497
      new_alone_id = (pri_node, new_node, None, p_minor, new_minor, o_secret)
5498
      new_net_id = (pri_node, new_node, o_port, p_minor, new_minor, o_secret)
5499

    
5500
      iv_names[idx] = (dev, dev.children, new_net_id)
5501
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
5502
                    new_net_id)
5503
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
5504
                              logical_id=new_alone_id,
5505
                              children=dev.children,
5506
                              size=dev.size)
5507
      try:
5508
        _CreateSingleBlockDev(self, new_node, instance, new_drbd,
5509
                              _GetInstanceInfoText(instance), False)
5510
      except errors.GenericError:
5511
        self.cfg.ReleaseDRBDMinors(instance.name)
5512
        raise
5513

    
5514
    for idx, dev in enumerate(instance.disks):
5515
      # we have new devices, shutdown the drbd on the old secondary
5516
      info("shutting down drbd for disk/%d on old node" % idx)
5517
      cfg.SetDiskID(dev, old_node)
5518
      msg = self.rpc.call_blockdev_shutdown(old_node, dev).fail_msg
5519
      if msg:
5520
        warning("Failed to shutdown drbd for disk/%d on old node: %s" %
5521
                (idx, msg),
5522
                hint="Please cleanup this device manually as soon as possible")
5523

    
5524
    info("detaching primary drbds from the network (=> standalone)")
5525
    result = self.rpc.call_drbd_disconnect_net([pri_node], nodes_ip,
5526
                                               instance.disks)[pri_node]
5527

    
5528
    msg = result.fail_msg
5529
    if msg:
5530
      # detaches didn't succeed (unlikely)
5531
      self.cfg.ReleaseDRBDMinors(instance.name)
5532
      raise errors.OpExecError("Can't detach the disks from the network on"
5533
                               " old node: %s" % (msg,))
5534

    
5535
    # if we managed to detach at least one, we update all the disks of
5536
    # the instance to point to the new secondary
5537
    info("updating instance configuration")
5538
    for dev, _, new_logical_id in iv_names.itervalues():
5539
      dev.logical_id = new_logical_id
5540
      cfg.SetDiskID(dev, pri_node)
5541
    cfg.Update(instance)
5542

    
5543
    # and now perform the drbd attach
5544
    info("attaching primary drbds to new secondary (standalone => connected)")
5545
    result = self.rpc.call_drbd_attach_net([pri_node, new_node], nodes_ip,
5546
                                           instance.disks, instance.name,
5547
                                           False)
5548
    for to_node, to_result in result.items():
5549
      msg = to_result.fail_msg
5550
      if msg:
5551
        warning("can't attach drbd disks on node %s: %s", to_node, msg,
5552
                hint="please do a gnt-instance info to see the"
5553
                " status of disks")
5554

    
5555
    # this can fail as the old devices are degraded and _WaitForSync
5556
    # does a combined result over all disks, so we don't check its
5557
    # return value
5558
    self.proc.LogStep(5, steps_total, "sync devices")
5559
    _WaitForSync(self, instance, unlock=True)
5560

    
5561
    # so check manually all the devices
5562
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
5563
      cfg.SetDiskID(dev, pri_node)
5564
      result = self.rpc.call_blockdev_find(pri_node, dev)
5565
      msg = result.fail_msg
5566
      if not msg and not result.payload:
5567
        msg = "disk not found"
5568
      if msg:
5569
        raise errors.OpExecError("Can't find DRBD device disk/%d: %s" %
5570
                                 (idx, msg))
5571
      if result.payload[5]:
5572
        raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx)
5573

    
5574
    self.proc.LogStep(6, steps_total, "removing old storage")
5575
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
5576
      info("remove logical volumes for disk/%d" % idx)
5577
      for lv in old_lvs:
5578
        cfg.SetDiskID(lv, old_node)
5579
        msg = self.rpc.call_blockdev_remove(old_node, lv).fail_msg
5580
        if msg:
5581
          warning("Can't remove LV on old secondary: %s", msg,
5582
                  hint="Cleanup stale volumes by hand")
5583

    
5584
  def Exec(self, feedback_fn):
5585
    """Execute disk replacement.
5586

5587
    This dispatches the disk replacement to the appropriate handler.
5588

5589
    """
5590
    instance = self.instance
5591

    
5592
    # Activate the instance disks if we're replacing them on a down instance
5593
    if not instance.admin_up:
5594
      _StartInstanceDisks(self, instance, True)
5595

    
5596
    if self.op.mode == constants.REPLACE_DISK_CHG:
5597
      fn = self._ExecD8Secondary
5598
    else:
5599
      fn = self._ExecD8DiskOnly
5600

    
5601
    ret = fn(feedback_fn)
5602

    
5603
    # Deactivate the instance disks if we're replacing them on a down instance
5604
    if not instance.admin_up:
5605
      _SafeShutdownInstanceDisks(self, instance)
5606

    
5607
    return ret
5608

    
5609

    
5610
class LUGrowDisk(LogicalUnit):
5611
  """Grow a disk of an instance.
5612

5613
  """
5614
  HPATH = "disk-grow"
5615
  HTYPE = constants.HTYPE_INSTANCE
5616
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
5617
  REQ_BGL = False
5618

    
5619
  def ExpandNames(self):
5620
    self._ExpandAndLockInstance()
5621
    self.needed_locks[locking.LEVEL_NODE] = []
5622
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5623

    
5624
  def DeclareLocks(self, level):
5625
    if level == locking.LEVEL_NODE:
5626
      self._LockInstancesNodes()
5627

    
5628
  def BuildHooksEnv(self):
5629
    """Build hooks env.
5630

5631
    This runs on the master, the primary and all the secondaries.
5632

5633
    """
5634
    env = {
5635
      "DISK": self.op.disk,
5636
      "AMOUNT": self.op.amount,
5637
      }
5638
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5639
    nl = [
5640
      self.cfg.GetMasterNode(),
5641
      self.instance.primary_node,
5642
      ]
5643
    return env, nl, nl
5644

    
5645
  def CheckPrereq(self):
5646
    """Check prerequisites.
5647

5648
    This checks that the instance is in the cluster.
5649

5650
    """
5651
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5652
    assert instance is not None, \
5653
      "Cannot retrieve locked instance %s" % self.op.instance_name
5654
    nodenames = list(instance.all_nodes)
5655
    for node in nodenames:
5656
      _CheckNodeOnline(self, node)
5657

    
5658

    
5659
    self.instance = instance
5660

    
5661
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
5662
      raise errors.OpPrereqError("Instance's disk layout does not support"
5663
                                 " growing.")
5664

    
5665
    self.disk = instance.FindDisk(self.op.disk)
5666

    
5667
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
5668
                                       instance.hypervisor)
5669
    for node in nodenames:
5670
      info = nodeinfo[node]
5671
      info.Raise("Cannot get current information from node %s" % node)
5672
      vg_free = info.payload.get('vg_free', None)
5673
      if not isinstance(vg_free, int):
5674
        raise errors.OpPrereqError("Can't compute free disk space on"
5675
                                   " node %s" % node)
5676
      if self.op.amount > vg_free:
5677
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
5678
                                   " %d MiB available, %d MiB required" %
5679
                                   (node, vg_free, self.op.amount))
5680

    
5681
  def Exec(self, feedback_fn):
5682
    """Execute disk grow.
5683

5684
    """
5685
    instance = self.instance
5686
    disk = self.disk
5687
    for node in instance.all_nodes:
5688
      self.cfg.SetDiskID(disk, node)
5689
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
5690
      result.Raise("Grow request failed to node %s" % node)
5691
    disk.RecordGrow(self.op.amount)
5692
    self.cfg.Update(instance)
5693
    if self.op.wait_for_sync:
5694
      disk_abort = not _WaitForSync(self, instance)
5695
      if disk_abort:
5696
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
5697
                             " status.\nPlease check the instance.")
5698

    
5699

    
5700
class LUQueryInstanceData(NoHooksLU):
5701
  """Query runtime instance data.
5702

5703
  """
5704
  _OP_REQP = ["instances", "static"]
5705
  REQ_BGL = False
5706

    
5707
  def ExpandNames(self):
5708
    self.needed_locks = {}
5709
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
5710

    
5711
    if not isinstance(self.op.instances, list):
5712
      raise errors.OpPrereqError("Invalid argument type 'instances'")
5713

    
5714
    if self.op.instances:
5715
      self.wanted_names = []
5716
      for name in self.op.instances:
5717
        full_name = self.cfg.ExpandInstanceName(name)
5718
        if full_name is None:
5719
          raise errors.OpPrereqError("Instance '%s' not known" % name)
5720
        self.wanted_names.append(full_name)
5721
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
5722
    else:
5723
      self.wanted_names = None
5724
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
5725

    
5726
    self.needed_locks[locking.LEVEL_NODE] = []
5727
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5728

    
5729
  def DeclareLocks(self, level):
5730
    if level == locking.LEVEL_NODE:
5731
      self._LockInstancesNodes()
5732

    
5733
  def CheckPrereq(self):
5734
    """Check prerequisites.
5735

5736
    This only checks the optional instance list against the existing names.
5737

5738
    """
5739
    if self.wanted_names is None:
5740
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
5741

    
5742
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
5743
                             in self.wanted_names]
5744
    return
5745

    
5746
  def _ComputeDiskStatus(self, instance, snode, dev):
5747
    """Compute block device status.
5748

5749
    """
5750
    static = self.op.static
5751
    if not static:
5752
      self.cfg.SetDiskID(dev, instance.primary_node)
5753
      dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
5754
      if dev_pstatus.offline:
5755
        dev_pstatus = None
5756
      else:
5757
        dev_pstatus.Raise("Can't compute disk status for %s" % instance.name)
5758
        dev_pstatus = dev_pstatus.payload
5759
    else:
5760
      dev_pstatus = None
5761

    
5762
    if dev.dev_type in constants.LDS_DRBD:
5763
      # we change the snode then (otherwise we use the one passed in)
5764
      if dev.logical_id[0] == instance.primary_node:
5765
        snode = dev.logical_id[1]
5766
      else:
5767
        snode = dev.logical_id[0]
5768

    
5769
    if snode and not static:
5770
      self.cfg.SetDiskID(dev, snode)
5771
      dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
5772
      if dev_sstatus.offline:
5773
        dev_sstatus = None
5774
      else:
5775
        dev_sstatus.Raise("Can't compute disk status for %s" % instance.name)
5776
        dev_sstatus = dev_sstatus.payload
5777
    else:
5778
      dev_sstatus = None
5779

    
5780
    if dev.children:
5781
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
5782
                      for child in dev.children]
5783
    else:
5784
      dev_children = []
5785

    
5786
    data = {
5787
      "iv_name": dev.iv_name,
5788
      "dev_type": dev.dev_type,
5789
      "logical_id": dev.logical_id,
5790
      "physical_id": dev.physical_id,
5791
      "pstatus": dev_pstatus,
5792
      "sstatus": dev_sstatus,
5793
      "children": dev_children,
5794
      "mode": dev.mode,
5795
      }
5796

    
5797
    return data
5798

    
5799
  def Exec(self, feedback_fn):
5800
    """Gather and return data"""
5801
    result = {}
5802

    
5803
    cluster = self.cfg.GetClusterInfo()
5804

    
5805
    for instance in self.wanted_instances:
5806
      if not self.op.static:
5807
        remote_info = self.rpc.call_instance_info(instance.primary_node,
5808
                                                  instance.name,
5809
                                                  instance.hypervisor)
5810
        remote_info.Raise("Error checking node %s" % instance.primary_node)
5811
        remote_info = remote_info.payload
5812
        if remote_info and "state" in remote_info:
5813
          remote_state = "up"
5814
        else:
5815
          remote_state = "down"
5816
      else:
5817
        remote_state = None
5818
      if instance.admin_up:
5819
        config_state = "up"
5820
      else:
5821
        config_state = "down"
5822

    
5823
      disks = [self._ComputeDiskStatus(instance, None, device)
5824
               for device in instance.disks]
5825

    
5826
      idict = {
5827
        "name": instance.name,
5828
        "config_state": config_state,
5829
        "run_state": remote_state,
5830
        "pnode": instance.primary_node,
5831
        "snodes": instance.secondary_nodes,
5832
        "os": instance.os,
5833
        # this happens to be the same format used for hooks
5834
        "nics": _NICListToTuple(self, instance.nics),
5835
        "disks": disks,
5836
        "hypervisor": instance.hypervisor,
5837
        "network_port": instance.network_port,
5838
        "hv_instance": instance.hvparams,
5839
        "hv_actual": cluster.FillHV(instance),
5840
        "be_instance": instance.beparams,
5841
        "be_actual": cluster.FillBE(instance),
5842
        }
5843

    
5844
      result[instance.name] = idict
5845

    
5846
    return result
5847

    
5848

    
5849
class LUSetInstanceParams(LogicalUnit):
5850
  """Modifies an instances's parameters.
5851

5852
  """
5853
  HPATH = "instance-modify"
5854
  HTYPE = constants.HTYPE_INSTANCE
5855
  _OP_REQP = ["instance_name"]
5856
  REQ_BGL = False
5857

    
5858
  def CheckArguments(self):
5859
    if not hasattr(self.op, 'nics'):
5860
      self.op.nics = []
5861
    if not hasattr(self.op, 'disks'):
5862
      self.op.disks = []
5863
    if not hasattr(self.op, 'beparams'):
5864
      self.op.beparams = {}
5865
    if not hasattr(self.op, 'hvparams'):
5866
      self.op.hvparams = {}
5867
    self.op.force = getattr(self.op, "force", False)
5868
    if not (self.op.nics or self.op.disks or
5869
            self.op.hvparams or self.op.beparams):
5870
      raise errors.OpPrereqError("No changes submitted")
5871

    
5872
    # Disk validation
5873
    disk_addremove = 0
5874
    for disk_op, disk_dict in self.op.disks:
5875
      if disk_op == constants.DDM_REMOVE:
5876
        disk_addremove += 1
5877
        continue
5878
      elif disk_op == constants.DDM_ADD:
5879
        disk_addremove += 1
5880
      else:
5881
        if not isinstance(disk_op, int):
5882
          raise errors.OpPrereqError("Invalid disk index")
5883
      if disk_op == constants.DDM_ADD:
5884
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
5885
        if mode not in constants.DISK_ACCESS_SET:
5886
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode)
5887
        size = disk_dict.get('size', None)
5888
        if size is None:
5889
          raise errors.OpPrereqError("Required disk parameter size missing")
5890
        try:
5891
          size = int(size)
5892
        except ValueError, err:
5893
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
5894
                                     str(err))
5895
        disk_dict['size'] = size
5896
      else:
5897
        # modification of disk
5898
        if 'size' in disk_dict:
5899
          raise errors.OpPrereqError("Disk size change not possible, use"
5900
                                     " grow-disk")
5901

    
5902
    if disk_addremove > 1:
5903
      raise errors.OpPrereqError("Only one disk add or remove operation"
5904
                                 " supported at a time")
5905

    
5906
    # NIC validation
5907
    nic_addremove = 0
5908
    for nic_op, nic_dict in self.op.nics:
5909
      if nic_op == constants.DDM_REMOVE:
5910
        nic_addremove += 1
5911
        continue
5912
      elif nic_op == constants.DDM_ADD:
5913
        nic_addremove += 1
5914
      else:
5915
        if not isinstance(nic_op, int):
5916
          raise errors.OpPrereqError("Invalid nic index")
5917

    
5918
      # nic_dict should be a dict
5919
      nic_ip = nic_dict.get('ip', None)
5920
      if nic_ip is not None:
5921
        if nic_ip.lower() == constants.VALUE_NONE:
5922
          nic_dict['ip'] = None
5923
        else:
5924
          if not utils.IsValidIP(nic_ip):
5925
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip)
5926

    
5927
      nic_bridge = nic_dict.get('bridge', None)
5928
      nic_link = nic_dict.get('link', None)
5929
      if nic_bridge and nic_link:
5930
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link' at the same time")
5931
      elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
5932
        nic_dict['bridge'] = None
5933
      elif nic_link and nic_link.lower() == constants.VALUE_NONE:
5934
        nic_dict['link'] = None
5935

    
5936
      if nic_op == constants.DDM_ADD:
5937
        nic_mac = nic_dict.get('mac', None)
5938
        if nic_mac is None:
5939
          nic_dict['mac'] = constants.VALUE_AUTO
5940

    
5941
      if 'mac' in nic_dict:
5942
        nic_mac = nic_dict['mac']
5943
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5944
          if not utils.IsValidMac(nic_mac):
5945
            raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac)
5946
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
5947
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
5948
                                     " modifying an existing nic")
5949

    
5950
    if nic_addremove > 1:
5951
      raise errors.OpPrereqError("Only one NIC add or remove operation"
5952
                                 " supported at a time")
5953

    
5954
  def ExpandNames(self):
5955
    self._ExpandAndLockInstance()
5956
    self.needed_locks[locking.LEVEL_NODE] = []
5957
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5958

    
5959
  def DeclareLocks(self, level):
5960
    if level == locking.LEVEL_NODE:
5961
      self._LockInstancesNodes()
5962

    
5963
  def BuildHooksEnv(self):
5964
    """Build hooks env.
5965

5966
    This runs on the master, primary and secondaries.
5967

5968
    """
5969
    args = dict()
5970
    if constants.BE_MEMORY in self.be_new:
5971
      args['memory'] = self.be_new[constants.BE_MEMORY]
5972
    if constants.BE_VCPUS in self.be_new:
5973
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
5974
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
5975
    # information at all.
5976
    if self.op.nics:
5977
      args['nics'] = []
5978
      nic_override = dict(self.op.nics)
5979
      c_nicparams = self.cluster.nicparams[constants.PP_DEFAULT]
5980
      for idx, nic in enumerate(self.instance.nics):
5981
        if idx in nic_override:
5982
          this_nic_override = nic_override[idx]
5983
        else:
5984
          this_nic_override = {}
5985
        if 'ip' in this_nic_override:
5986
          ip = this_nic_override['ip']
5987
        else:
5988
          ip = nic.ip
5989
        if 'mac' in this_nic_override:
5990
          mac = this_nic_override['mac']
5991
        else:
5992
          mac = nic.mac
5993
        if idx in self.nic_pnew:
5994
          nicparams = self.nic_pnew[idx]
5995
        else:
5996
          nicparams = objects.FillDict(c_nicparams, nic.nicparams)
5997
        mode = nicparams[constants.NIC_MODE]
5998
        link = nicparams[constants.NIC_LINK]
5999
        args['nics'].append((ip, mac, mode, link))
6000
      if constants.DDM_ADD in nic_override:
6001
        ip = nic_override[constants.DDM_ADD].get('ip', None)
6002
        mac = nic_override[constants.DDM_ADD]['mac']
6003
        nicparams = self.nic_pnew[constants.DDM_ADD]
6004
        mode = nicparams[constants.NIC_MODE]
6005
        link = nicparams[constants.NIC_LINK]
6006
        args['nics'].append((ip, mac, mode, link))
6007
      elif constants.DDM_REMOVE in nic_override:
6008
        del args['nics'][-1]
6009

    
6010
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
6011
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6012
    return env, nl, nl
6013

    
6014
  def _GetUpdatedParams(self, old_params, update_dict,
6015
                        default_values, parameter_types):
6016
    """Return the new params dict for the given params.
6017

6018
    @type old_params: dict
6019
    @type old_params: old parameters
6020
    @type update_dict: dict
6021
    @type update_dict: dict containing new parameter values,
6022
                       or constants.VALUE_DEFAULT to reset the
6023
                       parameter to its default value
6024
    @type default_values: dict
6025
    @param default_values: default values for the filled parameters
6026
    @type parameter_types: dict
6027
    @param parameter_types: dict mapping target dict keys to types
6028
                            in constants.ENFORCEABLE_TYPES
6029
    @rtype: (dict, dict)
6030
    @return: (new_parameters, filled_parameters)
6031

6032
    """
6033
    params_copy = copy.deepcopy(old_params)
6034
    for key, val in update_dict.iteritems():
6035
      if val == constants.VALUE_DEFAULT:
6036
        try:
6037
          del params_copy[key]
6038
        except KeyError:
6039
          pass
6040
      else:
6041
        params_copy[key] = val
6042
    utils.ForceDictType(params_copy, parameter_types)
6043
    params_filled = objects.FillDict(default_values, params_copy)
6044
    return (params_copy, params_filled)
6045

    
6046
  def CheckPrereq(self):
6047
    """Check prerequisites.
6048

6049
    This only checks the instance list against the existing names.
6050

6051
    """
6052
    force = self.force = self.op.force
6053

    
6054
    # checking the new params on the primary/secondary nodes
6055

    
6056
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6057
    cluster = self.cluster = self.cfg.GetClusterInfo()
6058
    assert self.instance is not None, \
6059
      "Cannot retrieve locked instance %s" % self.op.instance_name
6060
    pnode = instance.primary_node
6061
    nodelist = list(instance.all_nodes)
6062

    
6063
    # hvparams processing
6064
    if self.op.hvparams:
6065
      i_hvdict, hv_new = self._GetUpdatedParams(
6066
                             instance.hvparams, self.op.hvparams,
6067
                             cluster.hvparams[instance.hypervisor],
6068
                             constants.HVS_PARAMETER_TYPES)
6069
      # local check
6070
      hypervisor.GetHypervisor(
6071
        instance.hypervisor).CheckParameterSyntax(hv_new)
6072
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
6073
      self.hv_new = hv_new # the new actual values
6074
      self.hv_inst = i_hvdict # the new dict (without defaults)
6075
    else:
6076
      self.hv_new = self.hv_inst = {}
6077

    
6078
    # beparams processing
6079
    if self.op.beparams:
6080
      i_bedict, be_new = self._GetUpdatedParams(
6081
                             instance.beparams, self.op.beparams,
6082
                             cluster.beparams[constants.PP_DEFAULT],
6083
                             constants.BES_PARAMETER_TYPES)
6084
      self.be_new = be_new # the new actual values
6085
      self.be_inst = i_bedict # the new dict (without defaults)
6086
    else:
6087
      self.be_new = self.be_inst = {}
6088

    
6089
    self.warn = []
6090

    
6091
    if constants.BE_MEMORY in self.op.beparams and not self.force:
6092
      mem_check_list = [pnode]
6093
      if be_new[constants.BE_AUTO_BALANCE]:
6094
        # either we changed auto_balance to yes or it was from before
6095
        mem_check_list.extend(instance.secondary_nodes)
6096
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
6097
                                                  instance.hypervisor)
6098
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
6099
                                         instance.hypervisor)
6100
      pninfo = nodeinfo[pnode]
6101
      msg = pninfo.fail_msg
6102
      if msg:
6103
        # Assume the primary node is unreachable and go ahead
6104
        self.warn.append("Can't get info from primary node %s: %s" %
6105
                         (pnode,  msg))
6106
      elif not isinstance(pninfo.payload.get('memory_free', None), int):
6107
        self.warn.append("Node data from primary node %s doesn't contain"
6108
                         " free memory information" % pnode)
6109
      elif instance_info.fail_msg:
6110
        self.warn.append("Can't get instance runtime information: %s" %
6111
                        instance_info.fail_msg)
6112
      else:
6113
        if instance_info.payload:
6114
          current_mem = int(instance_info.payload['memory'])
6115
        else:
6116
          # Assume instance not running
6117
          # (there is a slight race condition here, but it's not very probable,
6118
          # and we have no other way to check)
6119
          current_mem = 0
6120
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
6121
                    pninfo.payload['memory_free'])
6122
        if miss_mem > 0:
6123
          raise errors.OpPrereqError("This change will prevent the instance"
6124
                                     " from starting, due to %d MB of memory"
6125
                                     " missing on its primary node" % miss_mem)
6126

    
6127
      if be_new[constants.BE_AUTO_BALANCE]:
6128
        for node, nres in nodeinfo.items():
6129
          if node not in instance.secondary_nodes:
6130
            continue
6131
          msg = nres.fail_msg
6132
          if msg:
6133
            self.warn.append("Can't get info from secondary node %s: %s" %
6134
                             (node, msg))
6135
          elif not isinstance(nres.payload.get('memory_free', None), int):
6136
            self.warn.append("Secondary node %s didn't return free"
6137
                             " memory information" % node)
6138
          elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
6139
            self.warn.append("Not enough memory to failover instance to"
6140
                             " secondary node %s" % node)
6141

    
6142
    # NIC processing
6143
    self.nic_pnew = {}
6144
    self.nic_pinst = {}
6145
    for nic_op, nic_dict in self.op.nics:
6146
      if nic_op == constants.DDM_REMOVE:
6147
        if not instance.nics:
6148
          raise errors.OpPrereqError("Instance has no NICs, cannot remove")
6149
        continue
6150
      if nic_op != constants.DDM_ADD:
6151
        # an existing nic
6152
        if nic_op < 0 or nic_op >= len(instance.nics):
6153
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
6154
                                     " are 0 to %d" %
6155
                                     (nic_op, len(instance.nics)))
6156
        old_nic_params = instance.nics[nic_op].nicparams
6157
        old_nic_ip = instance.nics[nic_op].ip
6158
      else:
6159
        old_nic_params = {}
6160
        old_nic_ip = None
6161

    
6162
      update_params_dict = dict([(key, nic_dict[key])
6163
                                 for key in constants.NICS_PARAMETERS
6164
                                 if key in nic_dict])
6165

    
6166
      if 'bridge' in nic_dict:
6167
        update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
6168

    
6169
      new_nic_params, new_filled_nic_params = \
6170
          self._GetUpdatedParams(old_nic_params, update_params_dict,
6171
                                 cluster.nicparams[constants.PP_DEFAULT],
6172
                                 constants.NICS_PARAMETER_TYPES)
6173
      objects.NIC.CheckParameterSyntax(new_filled_nic_params)
6174
      self.nic_pinst[nic_op] = new_nic_params
6175
      self.nic_pnew[nic_op] = new_filled_nic_params
6176
      new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
6177

    
6178
      if new_nic_mode == constants.NIC_MODE_BRIDGED:
6179
        nic_bridge = new_filled_nic_params[constants.NIC_LINK]
6180
        msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
6181
        if msg:
6182
          msg = "Error checking bridges on node %s: %s" % (pnode, msg)
6183
          if self.force:
6184
            self.warn.append(msg)
6185
          else:
6186
            raise errors.OpPrereqError(msg)
6187
      if new_nic_mode == constants.NIC_MODE_ROUTED:
6188
        if 'ip' in nic_dict:
6189
          nic_ip = nic_dict['ip']
6190
        else:
6191
          nic_ip = old_nic_ip
6192
        if nic_ip is None:
6193
          raise errors.OpPrereqError('Cannot set the nic ip to None'
6194
                                     ' on a routed nic')
6195
      if 'mac' in nic_dict:
6196
        nic_mac = nic_dict['mac']
6197
        if nic_mac is None:
6198
          raise errors.OpPrereqError('Cannot set the nic mac to None')
6199
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
6200
          # otherwise generate the mac
6201
          nic_dict['mac'] = self.cfg.GenerateMAC()
6202
        else:
6203
          # or validate/reserve the current one
6204
          if self.cfg.IsMacInUse(nic_mac):
6205
            raise errors.OpPrereqError("MAC address %s already in use"
6206
                                       " in cluster" % nic_mac)
6207

    
6208
    # DISK processing
6209
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
6210
      raise errors.OpPrereqError("Disk operations not supported for"
6211
                                 " diskless instances")
6212
    for disk_op, disk_dict in self.op.disks:
6213
      if disk_op == constants.DDM_REMOVE:
6214
        if len(instance.disks) == 1:
6215
          raise errors.OpPrereqError("Cannot remove the last disk of"
6216
                                     " an instance")
6217
        ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
6218
        ins_l = ins_l[pnode]
6219
        msg = ins_l.fail_msg
6220
        if msg:
6221
          raise errors.OpPrereqError("Can't contact node %s: %s" %
6222
                                     (pnode, msg))
6223
        if instance.name in ins_l.payload:
6224
          raise errors.OpPrereqError("Instance is running, can't remove"
6225
                                     " disks.")
6226

    
6227
      if (disk_op == constants.DDM_ADD and
6228
          len(instance.nics) >= constants.MAX_DISKS):
6229
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
6230
                                   " add more" % constants.MAX_DISKS)
6231
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
6232
        # an existing disk
6233
        if disk_op < 0 or disk_op >= len(instance.disks):
6234
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
6235
                                     " are 0 to %d" %
6236
                                     (disk_op, len(instance.disks)))
6237

    
6238
    return
6239

    
6240
  def Exec(self, feedback_fn):
6241
    """Modifies an instance.
6242

6243
    All parameters take effect only at the next restart of the instance.
6244

6245
    """
6246
    # Process here the warnings from CheckPrereq, as we don't have a
6247
    # feedback_fn there.
6248
    for warn in self.warn:
6249
      feedback_fn("WARNING: %s" % warn)
6250

    
6251
    result = []
6252
    instance = self.instance
6253
    cluster = self.cluster
6254
    # disk changes
6255
    for disk_op, disk_dict in self.op.disks:
6256
      if disk_op == constants.DDM_REMOVE:
6257
        # remove the last disk
6258
        device = instance.disks.pop()
6259
        device_idx = len(instance.disks)
6260
        for node, disk in device.ComputeNodeTree(instance.primary_node):
6261
          self.cfg.SetDiskID(disk, node)
6262
          msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
6263
          if msg:
6264
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
6265
                            " continuing anyway", device_idx, node, msg)
6266
        result.append(("disk/%d" % device_idx, "remove"))
6267
      elif disk_op == constants.DDM_ADD:
6268
        # add a new disk
6269
        if instance.disk_template == constants.DT_FILE:
6270
          file_driver, file_path = instance.disks[0].logical_id
6271
          file_path = os.path.dirname(file_path)
6272
        else:
6273
          file_driver = file_path = None
6274
        disk_idx_base = len(instance.disks)
6275
        new_disk = _GenerateDiskTemplate(self,
6276
                                         instance.disk_template,
6277
                                         instance.name, instance.primary_node,
6278
                                         instance.secondary_nodes,
6279
                                         [disk_dict],
6280
                                         file_path,
6281
                                         file_driver,
6282
                                         disk_idx_base)[0]
6283
        instance.disks.append(new_disk)
6284
        info = _GetInstanceInfoText(instance)
6285

    
6286
        logging.info("Creating volume %s for instance %s",
6287
                     new_disk.iv_name, instance.name)
6288
        # Note: this needs to be kept in sync with _CreateDisks
6289
        #HARDCODE
6290
        for node in instance.all_nodes:
6291
          f_create = node == instance.primary_node
6292
          try:
6293
            _CreateBlockDev(self, node, instance, new_disk,
6294
                            f_create, info, f_create)
6295
          except errors.OpExecError, err:
6296
            self.LogWarning("Failed to create volume %s (%s) on"
6297
                            " node %s: %s",
6298
                            new_disk.iv_name, new_disk, node, err)
6299
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
6300
                       (new_disk.size, new_disk.mode)))
6301
      else:
6302
        # change a given disk
6303
        instance.disks[disk_op].mode = disk_dict['mode']
6304
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
6305
    # NIC changes
6306
    for nic_op, nic_dict in self.op.nics:
6307
      if nic_op == constants.DDM_REMOVE:
6308
        # remove the last nic
6309
        del instance.nics[-1]
6310
        result.append(("nic.%d" % len(instance.nics), "remove"))
6311
      elif nic_op == constants.DDM_ADD:
6312
        # mac and bridge should be set, by now
6313
        mac = nic_dict['mac']
6314
        ip = nic_dict.get('ip', None)
6315
        nicparams = self.nic_pinst[constants.DDM_ADD]
6316
        new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
6317
        instance.nics.append(new_nic)
6318
        result.append(("nic.%d" % (len(instance.nics) - 1),
6319
                       "add:mac=%s,ip=%s,mode=%s,link=%s" %
6320
                       (new_nic.mac, new_nic.ip,
6321
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
6322
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
6323
                       )))
6324
      else:
6325
        for key in 'mac', 'ip':
6326
          if key in nic_dict:
6327
            setattr(instance.nics[nic_op], key, nic_dict[key])
6328
        if nic_op in self.nic_pnew:
6329
          instance.nics[nic_op].nicparams = self.nic_pnew[nic_op]
6330
        for key, val in nic_dict.iteritems():
6331
          result.append(("nic.%s/%d" % (key, nic_op), val))
6332

    
6333
    # hvparams changes
6334
    if self.op.hvparams:
6335
      instance.hvparams = self.hv_inst
6336
      for key, val in self.op.hvparams.iteritems():
6337
        result.append(("hv/%s" % key, val))
6338

    
6339
    # beparams changes
6340
    if self.op.beparams:
6341
      instance.beparams = self.be_inst
6342
      for key, val in self.op.beparams.iteritems():
6343
        result.append(("be/%s" % key, val))
6344

    
6345
    self.cfg.Update(instance)
6346

    
6347
    return result
6348

    
6349

    
6350
class LUQueryExports(NoHooksLU):
6351
  """Query the exports list
6352

6353
  """
6354
  _OP_REQP = ['nodes']
6355
  REQ_BGL = False
6356

    
6357
  def ExpandNames(self):
6358
    self.needed_locks = {}
6359
    self.share_locks[locking.LEVEL_NODE] = 1
6360
    if not self.op.nodes:
6361
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6362
    else:
6363
      self.needed_locks[locking.LEVEL_NODE] = \
6364
        _GetWantedNodes(self, self.op.nodes)
6365

    
6366
  def CheckPrereq(self):
6367
    """Check prerequisites.
6368

6369
    """
6370
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
6371

    
6372
  def Exec(self, feedback_fn):
6373
    """Compute the list of all the exported system images.
6374

6375
    @rtype: dict
6376
    @return: a dictionary with the structure node->(export-list)
6377
        where export-list is a list of the instances exported on
6378
        that node.
6379

6380
    """
6381
    rpcresult = self.rpc.call_export_list(self.nodes)
6382
    result = {}
6383
    for node in rpcresult:
6384
      if rpcresult[node].fail_msg:
6385
        result[node] = False
6386
      else:
6387
        result[node] = rpcresult[node].payload
6388

    
6389
    return result
6390

    
6391

    
6392
class LUExportInstance(LogicalUnit):
6393
  """Export an instance to an image in the cluster.
6394

6395
  """
6396
  HPATH = "instance-export"
6397
  HTYPE = constants.HTYPE_INSTANCE
6398
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
6399
  REQ_BGL = False
6400

    
6401
  def ExpandNames(self):
6402
    self._ExpandAndLockInstance()
6403
    # FIXME: lock only instance primary and destination node
6404
    #
6405
    # Sad but true, for now we have do lock all nodes, as we don't know where
6406
    # the previous export might be, and and in this LU we search for it and
6407
    # remove it from its current node. In the future we could fix this by:
6408
    #  - making a tasklet to search (share-lock all), then create the new one,
6409
    #    then one to remove, after
6410
    #  - removing the removal operation altoghether
6411
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6412

    
6413
  def DeclareLocks(self, level):
6414
    """Last minute lock declaration."""
6415
    # All nodes are locked anyway, so nothing to do here.
6416

    
6417
  def BuildHooksEnv(self):
6418
    """Build hooks env.
6419

6420
    This will run on the master, primary node and target node.
6421

6422
    """
6423
    env = {
6424
      "EXPORT_NODE": self.op.target_node,
6425
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
6426
      }
6427
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6428
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
6429
          self.op.target_node]
6430
    return env, nl, nl
6431

    
6432
  def CheckPrereq(self):
6433
    """Check prerequisites.
6434

6435
    This checks that the instance and node names are valid.
6436

6437
    """
6438
    instance_name = self.op.instance_name
6439
    self.instance = self.cfg.GetInstanceInfo(instance_name)
6440
    assert self.instance is not None, \
6441
          "Cannot retrieve locked instance %s" % self.op.instance_name
6442
    _CheckNodeOnline(self, self.instance.primary_node)
6443

    
6444
    self.dst_node = self.cfg.GetNodeInfo(
6445
      self.cfg.ExpandNodeName(self.op.target_node))
6446

    
6447
    if self.dst_node is None:
6448
      # This is wrong node name, not a non-locked node
6449
      raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node)
6450
    _CheckNodeOnline(self, self.dst_node.name)
6451
    _CheckNodeNotDrained(self, self.dst_node.name)
6452

    
6453
    # instance disk type verification
6454
    for disk in self.instance.disks:
6455
      if disk.dev_type == constants.LD_FILE:
6456
        raise errors.OpPrereqError("Export not supported for instances with"
6457
                                   " file-based disks")
6458

    
6459
  def Exec(self, feedback_fn):
6460
    """Export an instance to an image in the cluster.
6461

6462
    """
6463
    instance = self.instance
6464
    dst_node = self.dst_node
6465
    src_node = instance.primary_node
6466
    if self.op.shutdown:
6467
      # shutdown the instance, but not the disks
6468
      result = self.rpc.call_instance_shutdown(src_node, instance)
6469
      result.Raise("Could not shutdown instance %s on"
6470
                   " node %s" % (instance.name, src_node))
6471

    
6472
    vgname = self.cfg.GetVGName()
6473

    
6474
    snap_disks = []
6475

    
6476
    # set the disks ID correctly since call_instance_start needs the
6477
    # correct drbd minor to create the symlinks
6478
    for disk in instance.disks:
6479
      self.cfg.SetDiskID(disk, src_node)
6480

    
6481
    try:
6482
      for idx, disk in enumerate(instance.disks):
6483
        # result.payload will be a snapshot of an lvm leaf of the one we passed
6484
        result = self.rpc.call_blockdev_snapshot(src_node, disk)
6485
        msg = result.fail_msg
6486
        if msg:
6487
          self.LogWarning("Could not snapshot disk/%s on node %s: %s",
6488
                          idx, src_node, msg)
6489
          snap_disks.append(False)
6490
        else:
6491
          disk_id = (vgname, result.payload)
6492
          new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
6493
                                 logical_id=disk_id, physical_id=disk_id,
6494
                                 iv_name=disk.iv_name)
6495
          snap_disks.append(new_dev)
6496

    
6497
    finally:
6498
      if self.op.shutdown and instance.admin_up:
6499
        result = self.rpc.call_instance_start(src_node, instance, None, None)
6500
        msg = result.fail_msg
6501
        if msg:
6502
          _ShutdownInstanceDisks(self, instance)
6503
          raise errors.OpExecError("Could not start instance: %s" % msg)
6504

    
6505
    # TODO: check for size
6506

    
6507
    cluster_name = self.cfg.GetClusterName()
6508
    for idx, dev in enumerate(snap_disks):
6509
      if dev:
6510
        result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
6511
                                               instance, cluster_name, idx)
6512
        msg = result.fail_msg
6513
        if msg:
6514
          self.LogWarning("Could not export disk/%s from node %s to"
6515
                          " node %s: %s", idx, src_node, dst_node.name, msg)
6516
        msg = self.rpc.call_blockdev_remove(src_node, dev).fail_msg
6517
        if msg:
6518
          self.LogWarning("Could not remove snapshot for disk/%d from node"
6519
                          " %s: %s", idx, src_node, msg)
6520

    
6521
    result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
6522
    msg = result.fail_msg
6523
    if msg:
6524
      self.LogWarning("Could not finalize export for instance %s"
6525
                      " on node %s: %s", instance.name, dst_node.name, msg)
6526

    
6527
    nodelist = self.cfg.GetNodeList()
6528
    nodelist.remove(dst_node.name)
6529

    
6530
    # on one-node clusters nodelist will be empty after the removal
6531
    # if we proceed the backup would be removed because OpQueryExports
6532
    # substitutes an empty list with the full cluster node list.
6533
    iname = instance.name
6534
    if nodelist:
6535
      exportlist = self.rpc.call_export_list(nodelist)
6536
      for node in exportlist:
6537
        if exportlist[node].fail_msg:
6538
          continue
6539
        if iname in exportlist[node].payload:
6540
          msg = self.rpc.call_export_remove(node, iname).fail_msg
6541
          if msg:
6542
            self.LogWarning("Could not remove older export for instance %s"
6543
                            " on node %s: %s", iname, node, msg)
6544

    
6545

    
6546
class LURemoveExport(NoHooksLU):
6547
  """Remove exports related to the named instance.
6548

6549
  """
6550
  _OP_REQP = ["instance_name"]
6551
  REQ_BGL = False
6552

    
6553
  def ExpandNames(self):
6554
    self.needed_locks = {}
6555
    # We need all nodes to be locked in order for RemoveExport to work, but we
6556
    # don't need to lock the instance itself, as nothing will happen to it (and
6557
    # we can remove exports also for a removed instance)
6558
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6559

    
6560
  def CheckPrereq(self):
6561
    """Check prerequisites.
6562
    """
6563
    pass
6564

    
6565
  def Exec(self, feedback_fn):
6566
    """Remove any export.
6567

6568
    """
6569
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
6570
    # If the instance was not found we'll try with the name that was passed in.
6571
    # This will only work if it was an FQDN, though.
6572
    fqdn_warn = False
6573
    if not instance_name:
6574
      fqdn_warn = True
6575
      instance_name = self.op.instance_name
6576

    
6577
    locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
6578
    exportlist = self.rpc.call_export_list(locked_nodes)
6579
    found = False
6580
    for node in exportlist:
6581
      msg = exportlist[node].fail_msg
6582
      if msg:
6583
        self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
6584
        continue
6585
      if instance_name in exportlist[node].payload:
6586
        found = True
6587
        result = self.rpc.call_export_remove(node, instance_name)
6588
        msg = result.fail_msg
6589
        if msg:
6590
          logging.error("Could not remove export for instance %s"
6591
                        " on node %s: %s", instance_name, node, msg)
6592

    
6593
    if fqdn_warn and not found:
6594
      feedback_fn("Export not found. If trying to remove an export belonging"
6595
                  " to a deleted instance please use its Fully Qualified"
6596
                  " Domain Name.")
6597

    
6598

    
6599
class TagsLU(NoHooksLU):
6600
  """Generic tags LU.
6601

6602
  This is an abstract class which is the parent of all the other tags LUs.
6603

6604
  """
6605

    
6606
  def ExpandNames(self):
6607
    self.needed_locks = {}
6608
    if self.op.kind == constants.TAG_NODE:
6609
      name = self.cfg.ExpandNodeName(self.op.name)
6610
      if name is None:
6611
        raise errors.OpPrereqError("Invalid node name (%s)" %
6612
                                   (self.op.name,))
6613
      self.op.name = name
6614
      self.needed_locks[locking.LEVEL_NODE] = name
6615
    elif self.op.kind == constants.TAG_INSTANCE:
6616
      name = self.cfg.ExpandInstanceName(self.op.name)
6617
      if name is None:
6618
        raise errors.OpPrereqError("Invalid instance name (%s)" %
6619
                                   (self.op.name,))
6620
      self.op.name = name
6621
      self.needed_locks[locking.LEVEL_INSTANCE] = name
6622

    
6623
  def CheckPrereq(self):
6624
    """Check prerequisites.
6625

6626
    """
6627
    if self.op.kind == constants.TAG_CLUSTER:
6628
      self.target = self.cfg.GetClusterInfo()
6629
    elif self.op.kind == constants.TAG_NODE:
6630
      self.target = self.cfg.GetNodeInfo(self.op.name)
6631
    elif self.op.kind == constants.TAG_INSTANCE:
6632
      self.target = self.cfg.GetInstanceInfo(self.op.name)
6633
    else:
6634
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
6635
                                 str(self.op.kind))
6636

    
6637

    
6638
class LUGetTags(TagsLU):
6639
  """Returns the tags of a given object.
6640

6641
  """
6642
  _OP_REQP = ["kind", "name"]
6643
  REQ_BGL = False
6644

    
6645
  def Exec(self, feedback_fn):
6646
    """Returns the tag list.
6647

6648
    """
6649
    return list(self.target.GetTags())
6650

    
6651

    
6652
class LUSearchTags(NoHooksLU):
6653
  """Searches the tags for a given pattern.
6654

6655
  """
6656
  _OP_REQP = ["pattern"]
6657
  REQ_BGL = False
6658

    
6659
  def ExpandNames(self):
6660
    self.needed_locks = {}
6661

    
6662
  def CheckPrereq(self):
6663
    """Check prerequisites.
6664

6665
    This checks the pattern passed for validity by compiling it.
6666

6667
    """
6668
    try:
6669
      self.re = re.compile(self.op.pattern)
6670
    except re.error, err:
6671
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
6672
                                 (self.op.pattern, err))
6673

    
6674
  def Exec(self, feedback_fn):
6675
    """Returns the tag list.
6676

6677
    """
6678
    cfg = self.cfg
6679
    tgts = [("/cluster", cfg.GetClusterInfo())]
6680
    ilist = cfg.GetAllInstancesInfo().values()
6681
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
6682
    nlist = cfg.GetAllNodesInfo().values()
6683
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
6684
    results = []
6685
    for path, target in tgts:
6686
      for tag in target.GetTags():
6687
        if self.re.search(tag):
6688
          results.append((path, tag))
6689
    return results
6690

    
6691

    
6692
class LUAddTags(TagsLU):
6693
  """Sets a tag on a given object.
6694

6695
  """
6696
  _OP_REQP = ["kind", "name", "tags"]
6697
  REQ_BGL = False
6698

    
6699
  def CheckPrereq(self):
6700
    """Check prerequisites.
6701

6702
    This checks the type and length of the tag name and value.
6703

6704
    """
6705
    TagsLU.CheckPrereq(self)
6706
    for tag in self.op.tags:
6707
      objects.TaggableObject.ValidateTag(tag)
6708

    
6709
  def Exec(self, feedback_fn):
6710
    """Sets the tag.
6711

6712
    """
6713
    try:
6714
      for tag in self.op.tags:
6715
        self.target.AddTag(tag)
6716
    except errors.TagError, err:
6717
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
6718
    try:
6719
      self.cfg.Update(self.target)
6720
    except errors.ConfigurationError:
6721
      raise errors.OpRetryError("There has been a modification to the"
6722
                                " config file and the operation has been"
6723
                                " aborted. Please retry.")
6724

    
6725

    
6726
class LUDelTags(TagsLU):
6727
  """Delete a list of tags from a given object.
6728

6729
  """
6730
  _OP_REQP = ["kind", "name", "tags"]
6731
  REQ_BGL = False
6732

    
6733
  def CheckPrereq(self):
6734
    """Check prerequisites.
6735

6736
    This checks that we have the given tag.
6737

6738
    """
6739
    TagsLU.CheckPrereq(self)
6740
    for tag in self.op.tags:
6741
      objects.TaggableObject.ValidateTag(tag)
6742
    del_tags = frozenset(self.op.tags)
6743
    cur_tags = self.target.GetTags()
6744
    if not del_tags <= cur_tags:
6745
      diff_tags = del_tags - cur_tags
6746
      diff_names = ["'%s'" % tag for tag in diff_tags]
6747
      diff_names.sort()
6748
      raise errors.OpPrereqError("Tag(s) %s not found" %
6749
                                 (",".join(diff_names)))
6750

    
6751
  def Exec(self, feedback_fn):
6752
    """Remove the tag from the object.
6753

6754
    """
6755
    for tag in self.op.tags:
6756
      self.target.RemoveTag(tag)
6757
    try:
6758
      self.cfg.Update(self.target)
6759
    except errors.ConfigurationError:
6760
      raise errors.OpRetryError("There has been a modification to the"
6761
                                " config file and the operation has been"
6762
                                " aborted. Please retry.")
6763

    
6764

    
6765
class LUTestDelay(NoHooksLU):
6766
  """Sleep for a specified amount of time.
6767

6768
  This LU sleeps on the master and/or nodes for a specified amount of
6769
  time.
6770

6771
  """
6772
  _OP_REQP = ["duration", "on_master", "on_nodes"]
6773
  REQ_BGL = False
6774

    
6775
  def ExpandNames(self):
6776
    """Expand names and set required locks.
6777

6778
    This expands the node list, if any.
6779

6780
    """
6781
    self.needed_locks = {}
6782
    if self.op.on_nodes:
6783
      # _GetWantedNodes can be used here, but is not always appropriate to use
6784
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
6785
      # more information.
6786
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
6787
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
6788

    
6789
  def CheckPrereq(self):
6790
    """Check prerequisites.
6791

6792
    """
6793

    
6794
  def Exec(self, feedback_fn):
6795
    """Do the actual sleep.
6796

6797
    """
6798
    if self.op.on_master:
6799
      if not utils.TestDelay(self.op.duration):
6800
        raise errors.OpExecError("Error during master delay test")
6801
    if self.op.on_nodes:
6802
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
6803
      for node, node_result in result.items():
6804
        node_result.Raise("Failure during rpc call to node %s" % node)
6805

    
6806

    
6807
class IAllocator(object):
6808
  """IAllocator framework.
6809

6810
  An IAllocator instance has three sets of attributes:
6811
    - cfg that is needed to query the cluster
6812
    - input data (all members of the _KEYS class attribute are required)
6813
    - four buffer attributes (in|out_data|text), that represent the
6814
      input (to the external script) in text and data structure format,
6815
      and the output from it, again in two formats
6816
    - the result variables from the script (success, info, nodes) for
6817
      easy usage
6818

6819
  """
6820
  _ALLO_KEYS = [
6821
    "mem_size", "disks", "disk_template",
6822
    "os", "tags", "nics", "vcpus", "hypervisor",
6823
    ]
6824
  _RELO_KEYS = [
6825
    "relocate_from",
6826
    ]
6827

    
6828
  def __init__(self, lu, mode, name, **kwargs):
6829
    self.lu = lu
6830
    # init buffer variables
6831
    self.in_text = self.out_text = self.in_data = self.out_data = None
6832
    # init all input fields so that pylint is happy
6833
    self.mode = mode
6834
    self.name = name
6835
    self.mem_size = self.disks = self.disk_template = None
6836
    self.os = self.tags = self.nics = self.vcpus = None
6837
    self.hypervisor = None
6838
    self.relocate_from = None
6839
    # computed fields
6840
    self.required_nodes = None
6841
    # init result fields
6842
    self.success = self.info = self.nodes = None
6843
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6844
      keyset = self._ALLO_KEYS
6845
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6846
      keyset = self._RELO_KEYS
6847
    else:
6848
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
6849
                                   " IAllocator" % self.mode)
6850
    for key in kwargs:
6851
      if key not in keyset:
6852
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
6853
                                     " IAllocator" % key)
6854
      setattr(self, key, kwargs[key])
6855
    for key in keyset:
6856
      if key not in kwargs:
6857
        raise errors.ProgrammerError("Missing input parameter '%s' to"
6858
                                     " IAllocator" % key)
6859
    self._BuildInputData()
6860

    
6861
  def _ComputeClusterData(self):
6862
    """Compute the generic allocator input data.
6863

6864
    This is the data that is independent of the actual operation.
6865

6866
    """
6867
    cfg = self.lu.cfg
6868
    cluster_info = cfg.GetClusterInfo()
6869
    # cluster data
6870
    data = {
6871
      "version": constants.IALLOCATOR_VERSION,
6872
      "cluster_name": cfg.GetClusterName(),
6873
      "cluster_tags": list(cluster_info.GetTags()),
6874
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
6875
      # we don't have job IDs
6876
      }
6877
    iinfo = cfg.GetAllInstancesInfo().values()
6878
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
6879

    
6880
    # node data
6881
    node_results = {}
6882
    node_list = cfg.GetNodeList()
6883

    
6884
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6885
      hypervisor_name = self.hypervisor
6886
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6887
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
6888

    
6889
    node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
6890
                                           hypervisor_name)
6891
    node_iinfo = self.lu.rpc.call_all_instances_info(node_list,
6892
                       cluster_info.enabled_hypervisors)
6893
    for nname, nresult in node_data.items():
6894
      # first fill in static (config-based) values
6895
      ninfo = cfg.GetNodeInfo(nname)
6896
      pnr = {
6897
        "tags": list(ninfo.GetTags()),
6898
        "primary_ip": ninfo.primary_ip,
6899
        "secondary_ip": ninfo.secondary_ip,
6900
        "offline": ninfo.offline,
6901
        "drained": ninfo.drained,
6902
        "master_candidate": ninfo.master_candidate,
6903
        }
6904

    
6905
      if not ninfo.offline:
6906
        nresult.Raise("Can't get data for node %s" % nname)
6907
        node_iinfo[nname].Raise("Can't get node instance info from node %s" %
6908
                                nname)
6909
        remote_info = nresult.payload
6910
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
6911
                     'vg_size', 'vg_free', 'cpu_total']:
6912
          if attr not in remote_info:
6913
            raise errors.OpExecError("Node '%s' didn't return attribute"
6914
                                     " '%s'" % (nname, attr))
6915
          if not isinstance(remote_info[attr], int):
6916
            raise errors.OpExecError("Node '%s' returned invalid value"
6917
                                     " for '%s': %s" %
6918
                                     (nname, attr, remote_info[attr]))
6919
        # compute memory used by primary instances
6920
        i_p_mem = i_p_up_mem = 0
6921
        for iinfo, beinfo in i_list:
6922
          if iinfo.primary_node == nname:
6923
            i_p_mem += beinfo[constants.BE_MEMORY]
6924
            if iinfo.name not in node_iinfo[nname].payload:
6925
              i_used_mem = 0
6926
            else:
6927
              i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory'])
6928
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
6929
            remote_info['memory_free'] -= max(0, i_mem_diff)
6930

    
6931
            if iinfo.admin_up:
6932
              i_p_up_mem += beinfo[constants.BE_MEMORY]
6933

    
6934
        # compute memory used by instances
6935
        pnr_dyn = {
6936
          "total_memory": remote_info['memory_total'],
6937
          "reserved_memory": remote_info['memory_dom0'],
6938
          "free_memory": remote_info['memory_free'],
6939
          "total_disk": remote_info['vg_size'],
6940
          "free_disk": remote_info['vg_free'],
6941
          "total_cpus": remote_info['cpu_total'],
6942
          "i_pri_memory": i_p_mem,
6943
          "i_pri_up_memory": i_p_up_mem,
6944
          }
6945
        pnr.update(pnr_dyn)
6946

    
6947
      node_results[nname] = pnr
6948
    data["nodes"] = node_results
6949

    
6950
    # instance data
6951
    instance_data = {}
6952
    for iinfo, beinfo in i_list:
6953
      nic_data = []
6954
      for nic in iinfo.nics:
6955
        filled_params = objects.FillDict(
6956
            cluster_info.nicparams[constants.PP_DEFAULT],
6957
            nic.nicparams)
6958
        nic_dict = {"mac": nic.mac,
6959
                    "ip": nic.ip,
6960
                    "mode": filled_params[constants.NIC_MODE],
6961
                    "link": filled_params[constants.NIC_LINK],
6962
                   }
6963
        if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
6964
          nic_dict["bridge"] = filled_params[constants.NIC_LINK]
6965
        nic_data.append(nic_dict)
6966
      pir = {
6967
        "tags": list(iinfo.GetTags()),
6968
        "admin_up": iinfo.admin_up,
6969
        "vcpus": beinfo[constants.BE_VCPUS],
6970
        "memory": beinfo[constants.BE_MEMORY],
6971
        "os": iinfo.os,
6972
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
6973
        "nics": nic_data,
6974
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
6975
        "disk_template": iinfo.disk_template,
6976
        "hypervisor": iinfo.hypervisor,
6977
        }
6978
      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
6979
                                                 pir["disks"])
6980
      instance_data[iinfo.name] = pir
6981

    
6982
    data["instances"] = instance_data
6983

    
6984
    self.in_data = data
6985

    
6986
  def _AddNewInstance(self):
6987
    """Add new instance data to allocator structure.
6988

6989
    This in combination with _AllocatorGetClusterData will create the
6990
    correct structure needed as input for the allocator.
6991

6992
    The checks for the completeness of the opcode must have already been
6993
    done.
6994

6995
    """
6996
    data = self.in_data
6997

    
6998
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
6999

    
7000
    if self.disk_template in constants.DTS_NET_MIRROR:
7001
      self.required_nodes = 2
7002
    else:
7003
      self.required_nodes = 1
7004
    request = {
7005
      "type": "allocate",
7006
      "name": self.name,
7007
      "disk_template": self.disk_template,
7008
      "tags": self.tags,
7009
      "os": self.os,
7010
      "vcpus": self.vcpus,
7011
      "memory": self.mem_size,
7012
      "disks": self.disks,
7013
      "disk_space_total": disk_space,
7014
      "nics": self.nics,
7015
      "required_nodes": self.required_nodes,
7016
      }
7017
    data["request"] = request
7018

    
7019
  def _AddRelocateInstance(self):
7020
    """Add relocate instance data to allocator structure.
7021

7022
    This in combination with _IAllocatorGetClusterData will create the
7023
    correct structure needed as input for the allocator.
7024

7025
    The checks for the completeness of the opcode must have already been
7026
    done.
7027

7028
    """
7029
    instance = self.lu.cfg.GetInstanceInfo(self.name)
7030
    if instance is None:
7031
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
7032
                                   " IAllocator" % self.name)
7033

    
7034
    if instance.disk_template not in constants.DTS_NET_MIRROR:
7035
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
7036

    
7037
    if len(instance.secondary_nodes) != 1:
7038
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
7039

    
7040
    self.required_nodes = 1
7041
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
7042
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
7043

    
7044
    request = {
7045
      "type": "relocate",
7046
      "name": self.name,
7047
      "disk_space_total": disk_space,
7048
      "required_nodes": self.required_nodes,
7049
      "relocate_from": self.relocate_from,
7050
      }
7051
    self.in_data["request"] = request
7052

    
7053
  def _BuildInputData(self):
7054
    """Build input data structures.
7055

7056
    """
7057
    self._ComputeClusterData()
7058

    
7059
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
7060
      self._AddNewInstance()
7061
    else:
7062
      self._AddRelocateInstance()
7063

    
7064
    self.in_text = serializer.Dump(self.in_data)
7065

    
7066
  def Run(self, name, validate=True, call_fn=None):
7067
    """Run an instance allocator and return the results.
7068

7069
    """
7070
    if call_fn is None:
7071
      call_fn = self.lu.rpc.call_iallocator_runner
7072
    data = self.in_text
7073

    
7074
    result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
7075
    result.Raise("Failure while running the iallocator script")
7076

    
7077
    self.out_text = result.payload
7078
    if validate:
7079
      self._ValidateResult()
7080

    
7081
  def _ValidateResult(self):
7082
    """Process the allocator results.
7083

7084
    This will process and if successful save the result in
7085
    self.out_data and the other parameters.
7086

7087
    """
7088
    try:
7089
      rdict = serializer.Load(self.out_text)
7090
    except Exception, err:
7091
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
7092

    
7093
    if not isinstance(rdict, dict):
7094
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
7095

    
7096
    for key in "success", "info", "nodes":
7097
      if key not in rdict:
7098
        raise errors.OpExecError("Can't parse iallocator results:"
7099
                                 " missing key '%s'" % key)
7100
      setattr(self, key, rdict[key])
7101

    
7102
    if not isinstance(rdict["nodes"], list):
7103
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
7104
                               " is not a list")
7105
    self.out_data = rdict
7106

    
7107

    
7108
class LUTestAllocator(NoHooksLU):
7109
  """Run allocator tests.
7110

7111
  This LU runs the allocator tests
7112

7113
  """
7114
  _OP_REQP = ["direction", "mode", "name"]
7115

    
7116
  def CheckPrereq(self):
7117
    """Check prerequisites.
7118

7119
    This checks the opcode parameters depending on the director and mode test.
7120

7121
    """
7122
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
7123
      for attr in ["name", "mem_size", "disks", "disk_template",
7124
                   "os", "tags", "nics", "vcpus"]:
7125
        if not hasattr(self.op, attr):
7126
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
7127
                                     attr)
7128
      iname = self.cfg.ExpandInstanceName(self.op.name)
7129
      if iname is not None:
7130
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
7131
                                   iname)
7132
      if not isinstance(self.op.nics, list):
7133
        raise errors.OpPrereqError("Invalid parameter 'nics'")
7134
      for row in self.op.nics:
7135
        if (not isinstance(row, dict) or
7136
            "mac" not in row or
7137
            "ip" not in row or
7138
            "bridge" not in row):
7139
          raise errors.OpPrereqError("Invalid contents of the"
7140
                                     " 'nics' parameter")
7141
      if not isinstance(self.op.disks, list):
7142
        raise errors.OpPrereqError("Invalid parameter 'disks'")
7143
      for row in self.op.disks:
7144
        if (not isinstance(row, dict) or
7145
            "size" not in row or
7146
            not isinstance(row["size"], int) or
7147
            "mode" not in row or
7148
            row["mode"] not in ['r', 'w']):
7149
          raise errors.OpPrereqError("Invalid contents of the"
7150
                                     " 'disks' parameter")
7151
      if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None:
7152
        self.op.hypervisor = self.cfg.GetHypervisorType()
7153
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
7154
      if not hasattr(self.op, "name"):
7155
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
7156
      fname = self.cfg.ExpandInstanceName(self.op.name)
7157
      if fname is None:
7158
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
7159
                                   self.op.name)
7160
      self.op.name = fname
7161
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
7162
    else:
7163
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
7164
                                 self.op.mode)
7165

    
7166
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
7167
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
7168
        raise errors.OpPrereqError("Missing allocator name")
7169
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
7170
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
7171
                                 self.op.direction)
7172

    
7173
  def Exec(self, feedback_fn):
7174
    """Run the allocator test.
7175

7176
    """
7177
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
7178
      ial = IAllocator(self,
7179
                       mode=self.op.mode,
7180
                       name=self.op.name,
7181
                       mem_size=self.op.mem_size,
7182
                       disks=self.op.disks,
7183
                       disk_template=self.op.disk_template,
7184
                       os=self.op.os,
7185
                       tags=self.op.tags,
7186
                       nics=self.op.nics,
7187
                       vcpus=self.op.vcpus,
7188
                       hypervisor=self.op.hypervisor,
7189
                       )
7190
    else:
7191
      ial = IAllocator(self,
7192
                       mode=self.op.mode,
7193
                       name=self.op.name,
7194
                       relocate_from=list(self.relocate_from),
7195
                       )
7196

    
7197
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
7198
      result = ial.in_text
7199
    else:
7200
      ial.Run(self.op.allocator, validate=False)
7201
      result = ial.out_text
7202
    return result