Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 919ca415

History | View | Annotate | Download (315.8 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the master-side code."""
23

    
24
# pylint: disable-msg=W0201
25

    
26
# W0201 since most LU attributes are defined in CheckPrereq or similar
27
# functions
28

    
29
import os
30
import os.path
31
import time
32
import re
33
import platform
34
import logging
35
import copy
36

    
37
from ganeti import ssh
38
from ganeti import utils
39
from ganeti import errors
40
from ganeti import hypervisor
41
from ganeti import locking
42
from ganeti import constants
43
from ganeti import objects
44
from ganeti import serializer
45
from ganeti import ssconf
46

    
47

    
48
class LogicalUnit(object):
49
  """Logical Unit base class.
50

51
  Subclasses must follow these rules:
52
    - implement ExpandNames
53
    - implement CheckPrereq (except when tasklets are used)
54
    - implement Exec (except when tasklets are used)
55
    - implement BuildHooksEnv
56
    - redefine HPATH and HTYPE
57
    - optionally redefine their run requirements:
58
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
59

60
  Note that all commands require root permissions.
61

62
  @ivar dry_run_result: the value (if any) that will be returned to the caller
63
      in dry-run mode (signalled by opcode dry_run parameter)
64

65
  """
66
  HPATH = None
67
  HTYPE = None
68
  _OP_REQP = []
69
  REQ_BGL = True
70

    
71
  def __init__(self, processor, op, context, rpc):
72
    """Constructor for LogicalUnit.
73

74
    This needs to be overridden in derived classes in order to check op
75
    validity.
76

77
    """
78
    self.proc = processor
79
    self.op = op
80
    self.cfg = context.cfg
81
    self.context = context
82
    self.rpc = rpc
83
    # Dicts used to declare locking needs to mcpu
84
    self.needed_locks = None
85
    self.acquired_locks = {}
86
    self.share_locks = dict.fromkeys(locking.LEVELS, 0)
87
    self.add_locks = {}
88
    self.remove_locks = {}
89
    # Used to force good behavior when calling helper functions
90
    self.recalculate_locks = {}
91
    self.__ssh = None
92
    # logging
93
    self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103
94
    self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103
95
    self.LogStep = processor.LogStep # pylint: disable-msg=C0103
96
    # support for dry-run
97
    self.dry_run_result = None
98
    # support for generic debug attribute
99
    if (not hasattr(self.op, "debug_level") or
100
        not isinstance(self.op.debug_level, int)):
101
      self.op.debug_level = 0
102

    
103
    # Tasklets
104
    self.tasklets = None
105

    
106
    for attr_name in self._OP_REQP:
107
      attr_val = getattr(op, attr_name, None)
108
      if attr_val is None:
109
        raise errors.OpPrereqError("Required parameter '%s' missing" %
110
                                   attr_name, errors.ECODE_INVAL)
111

    
112
    self.CheckArguments()
113

    
114
  def __GetSSH(self):
115
    """Returns the SshRunner object
116

117
    """
118
    if not self.__ssh:
119
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
120
    return self.__ssh
121

    
122
  ssh = property(fget=__GetSSH)
123

    
124
  def CheckArguments(self):
125
    """Check syntactic validity for the opcode arguments.
126

127
    This method is for doing a simple syntactic check and ensure
128
    validity of opcode parameters, without any cluster-related
129
    checks. While the same can be accomplished in ExpandNames and/or
130
    CheckPrereq, doing these separate is better because:
131

132
      - ExpandNames is left as as purely a lock-related function
133
      - CheckPrereq is run after we have acquired locks (and possible
134
        waited for them)
135

136
    The function is allowed to change the self.op attribute so that
137
    later methods can no longer worry about missing parameters.
138

139
    """
140
    pass
141

    
142
  def ExpandNames(self):
143
    """Expand names for this LU.
144

145
    This method is called before starting to execute the opcode, and it should
146
    update all the parameters of the opcode to their canonical form (e.g. a
147
    short node name must be fully expanded after this method has successfully
148
    completed). This way locking, hooks, logging, ecc. can work correctly.
149

150
    LUs which implement this method must also populate the self.needed_locks
151
    member, as a dict with lock levels as keys, and a list of needed lock names
152
    as values. Rules:
153

154
      - use an empty dict if you don't need any lock
155
      - if you don't need any lock at a particular level omit that level
156
      - don't put anything for the BGL level
157
      - if you want all locks at a level use locking.ALL_SET as a value
158

159
    If you need to share locks (rather than acquire them exclusively) at one
160
    level you can modify self.share_locks, setting a true value (usually 1) for
161
    that level. By default locks are not shared.
162

163
    This function can also define a list of tasklets, which then will be
164
    executed in order instead of the usual LU-level CheckPrereq and Exec
165
    functions, if those are not defined by the LU.
166

167
    Examples::
168

169
      # Acquire all nodes and one instance
170
      self.needed_locks = {
171
        locking.LEVEL_NODE: locking.ALL_SET,
172
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
173
      }
174
      # Acquire just two nodes
175
      self.needed_locks = {
176
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
177
      }
178
      # Acquire no locks
179
      self.needed_locks = {} # No, you can't leave it to the default value None
180

181
    """
182
    # The implementation of this method is mandatory only if the new LU is
183
    # concurrent, so that old LUs don't need to be changed all at the same
184
    # time.
185
    if self.REQ_BGL:
186
      self.needed_locks = {} # Exclusive LUs don't need locks.
187
    else:
188
      raise NotImplementedError
189

    
190
  def DeclareLocks(self, level):
191
    """Declare LU locking needs for a level
192

193
    While most LUs can just declare their locking needs at ExpandNames time,
194
    sometimes there's the need to calculate some locks after having acquired
195
    the ones before. This function is called just before acquiring locks at a
196
    particular level, but after acquiring the ones at lower levels, and permits
197
    such calculations. It can be used to modify self.needed_locks, and by
198
    default it does nothing.
199

200
    This function is only called if you have something already set in
201
    self.needed_locks for the level.
202

203
    @param level: Locking level which is going to be locked
204
    @type level: member of ganeti.locking.LEVELS
205

206
    """
207

    
208
  def CheckPrereq(self):
209
    """Check prerequisites for this LU.
210

211
    This method should check that the prerequisites for the execution
212
    of this LU are fulfilled. It can do internode communication, but
213
    it should be idempotent - no cluster or system changes are
214
    allowed.
215

216
    The method should raise errors.OpPrereqError in case something is
217
    not fulfilled. Its return value is ignored.
218

219
    This method should also update all the parameters of the opcode to
220
    their canonical form if it hasn't been done by ExpandNames before.
221

222
    """
223
    if self.tasklets is not None:
224
      for (idx, tl) in enumerate(self.tasklets):
225
        logging.debug("Checking prerequisites for tasklet %s/%s",
226
                      idx + 1, len(self.tasklets))
227
        tl.CheckPrereq()
228
    else:
229
      raise NotImplementedError
230

    
231
  def Exec(self, feedback_fn):
232
    """Execute the LU.
233

234
    This method should implement the actual work. It should raise
235
    errors.OpExecError for failures that are somewhat dealt with in
236
    code, or expected.
237

238
    """
239
    if self.tasklets is not None:
240
      for (idx, tl) in enumerate(self.tasklets):
241
        logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
242
        tl.Exec(feedback_fn)
243
    else:
244
      raise NotImplementedError
245

    
246
  def BuildHooksEnv(self):
247
    """Build hooks environment for this LU.
248

249
    This method should return a three-node tuple consisting of: a dict
250
    containing the environment that will be used for running the
251
    specific hook for this LU, a list of node names on which the hook
252
    should run before the execution, and a list of node names on which
253
    the hook should run after the execution.
254

255
    The keys of the dict must not have 'GANETI_' prefixed as this will
256
    be handled in the hooks runner. Also note additional keys will be
257
    added by the hooks runner. If the LU doesn't define any
258
    environment, an empty dict (and not None) should be returned.
259

260
    No nodes should be returned as an empty list (and not None).
261

262
    Note that if the HPATH for a LU class is None, this function will
263
    not be called.
264

265
    """
266
    raise NotImplementedError
267

    
268
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
269
    """Notify the LU about the results of its hooks.
270

271
    This method is called every time a hooks phase is executed, and notifies
272
    the Logical Unit about the hooks' result. The LU can then use it to alter
273
    its result based on the hooks.  By default the method does nothing and the
274
    previous result is passed back unchanged but any LU can define it if it
275
    wants to use the local cluster hook-scripts somehow.
276

277
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
278
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
279
    @param hook_results: the results of the multi-node hooks rpc call
280
    @param feedback_fn: function used send feedback back to the caller
281
    @param lu_result: the previous Exec result this LU had, or None
282
        in the PRE phase
283
    @return: the new Exec result, based on the previous result
284
        and hook results
285

286
    """
287
    # API must be kept, thus we ignore the unused argument and could
288
    # be a function warnings
289
    # pylint: disable-msg=W0613,R0201
290
    return lu_result
291

    
292
  def _ExpandAndLockInstance(self):
293
    """Helper function to expand and lock an instance.
294

295
    Many LUs that work on an instance take its name in self.op.instance_name
296
    and need to expand it and then declare the expanded name for locking. This
297
    function does it, and then updates self.op.instance_name to the expanded
298
    name. It also initializes needed_locks as a dict, if this hasn't been done
299
    before.
300

301
    """
302
    if self.needed_locks is None:
303
      self.needed_locks = {}
304
    else:
305
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
306
        "_ExpandAndLockInstance called with instance-level locks set"
307
    self.op.instance_name = _ExpandInstanceName(self.cfg,
308
                                                self.op.instance_name)
309
    self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
310

    
311
  def _LockInstancesNodes(self, primary_only=False):
312
    """Helper function to declare instances' nodes for locking.
313

314
    This function should be called after locking one or more instances to lock
315
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
316
    with all primary or secondary nodes for instances already locked and
317
    present in self.needed_locks[locking.LEVEL_INSTANCE].
318

319
    It should be called from DeclareLocks, and for safety only works if
320
    self.recalculate_locks[locking.LEVEL_NODE] is set.
321

322
    In the future it may grow parameters to just lock some instance's nodes, or
323
    to just lock primaries or secondary nodes, if needed.
324

325
    If should be called in DeclareLocks in a way similar to::
326

327
      if level == locking.LEVEL_NODE:
328
        self._LockInstancesNodes()
329

330
    @type primary_only: boolean
331
    @param primary_only: only lock primary nodes of locked instances
332

333
    """
334
    assert locking.LEVEL_NODE in self.recalculate_locks, \
335
      "_LockInstancesNodes helper function called with no nodes to recalculate"
336

    
337
    # TODO: check if we're really been called with the instance locks held
338

    
339
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
340
    # future we might want to have different behaviors depending on the value
341
    # of self.recalculate_locks[locking.LEVEL_NODE]
342
    wanted_nodes = []
343
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
344
      instance = self.context.cfg.GetInstanceInfo(instance_name)
345
      wanted_nodes.append(instance.primary_node)
346
      if not primary_only:
347
        wanted_nodes.extend(instance.secondary_nodes)
348

    
349
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
350
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
351
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
352
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
353

    
354
    del self.recalculate_locks[locking.LEVEL_NODE]
355

    
356

    
357
class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223
358
  """Simple LU which runs no hooks.
359

360
  This LU is intended as a parent for other LogicalUnits which will
361
  run no hooks, in order to reduce duplicate code.
362

363
  """
364
  HPATH = None
365
  HTYPE = None
366

    
367
  def BuildHooksEnv(self):
368
    """Empty BuildHooksEnv for NoHooksLu.
369

370
    This just raises an error.
371

372
    """
373
    assert False, "BuildHooksEnv called for NoHooksLUs"
374

    
375

    
376
class Tasklet:
377
  """Tasklet base class.
378

379
  Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
380
  they can mix legacy code with tasklets. Locking needs to be done in the LU,
381
  tasklets know nothing about locks.
382

383
  Subclasses must follow these rules:
384
    - Implement CheckPrereq
385
    - Implement Exec
386

387
  """
388
  def __init__(self, lu):
389
    self.lu = lu
390

    
391
    # Shortcuts
392
    self.cfg = lu.cfg
393
    self.rpc = lu.rpc
394

    
395
  def CheckPrereq(self):
396
    """Check prerequisites for this tasklets.
397

398
    This method should check whether the prerequisites for the execution of
399
    this tasklet are fulfilled. It can do internode communication, but it
400
    should be idempotent - no cluster or system changes are allowed.
401

402
    The method should raise errors.OpPrereqError in case something is not
403
    fulfilled. Its return value is ignored.
404

405
    This method should also update all parameters to their canonical form if it
406
    hasn't been done before.
407

408
    """
409
    raise NotImplementedError
410

    
411
  def Exec(self, feedback_fn):
412
    """Execute the tasklet.
413

414
    This method should implement the actual work. It should raise
415
    errors.OpExecError for failures that are somewhat dealt with in code, or
416
    expected.
417

418
    """
419
    raise NotImplementedError
420

    
421

    
422
def _GetWantedNodes(lu, nodes):
423
  """Returns list of checked and expanded node names.
424

425
  @type lu: L{LogicalUnit}
426
  @param lu: the logical unit on whose behalf we execute
427
  @type nodes: list
428
  @param nodes: list of node names or None for all nodes
429
  @rtype: list
430
  @return: the list of nodes, sorted
431
  @raise errors.ProgrammerError: if the nodes parameter is wrong type
432

433
  """
434
  if not isinstance(nodes, list):
435
    raise errors.OpPrereqError("Invalid argument type 'nodes'",
436
                               errors.ECODE_INVAL)
437

    
438
  if not nodes:
439
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
440
      " non-empty list of nodes whose name is to be expanded.")
441

    
442
  wanted = [_ExpandNodeName(lu.cfg, name) for name in nodes]
443
  return utils.NiceSort(wanted)
444

    
445

    
446
def _GetWantedInstances(lu, instances):
447
  """Returns list of checked and expanded instance names.
448

449
  @type lu: L{LogicalUnit}
450
  @param lu: the logical unit on whose behalf we execute
451
  @type instances: list
452
  @param instances: list of instance names or None for all instances
453
  @rtype: list
454
  @return: the list of instances, sorted
455
  @raise errors.OpPrereqError: if the instances parameter is wrong type
456
  @raise errors.OpPrereqError: if any of the passed instances is not found
457

458
  """
459
  if not isinstance(instances, list):
460
    raise errors.OpPrereqError("Invalid argument type 'instances'",
461
                               errors.ECODE_INVAL)
462

    
463
  if instances:
464
    wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
465
  else:
466
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
467
  return wanted
468

    
469

    
470
def _CheckOutputFields(static, dynamic, selected):
471
  """Checks whether all selected fields are valid.
472

473
  @type static: L{utils.FieldSet}
474
  @param static: static fields set
475
  @type dynamic: L{utils.FieldSet}
476
  @param dynamic: dynamic fields set
477

478
  """
479
  f = utils.FieldSet()
480
  f.Extend(static)
481
  f.Extend(dynamic)
482

    
483
  delta = f.NonMatching(selected)
484
  if delta:
485
    raise errors.OpPrereqError("Unknown output fields selected: %s"
486
                               % ",".join(delta), errors.ECODE_INVAL)
487

    
488

    
489
def _CheckBooleanOpField(op, name):
490
  """Validates boolean opcode parameters.
491

492
  This will ensure that an opcode parameter is either a boolean value,
493
  or None (but that it always exists).
494

495
  """
496
  val = getattr(op, name, None)
497
  if not (val is None or isinstance(val, bool)):
498
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
499
                               (name, str(val)), errors.ECODE_INVAL)
500
  setattr(op, name, val)
501

    
502

    
503
def _CheckGlobalHvParams(params):
504
  """Validates that given hypervisor params are not global ones.
505

506
  This will ensure that instances don't get customised versions of
507
  global params.
508

509
  """
510
  used_globals = constants.HVC_GLOBALS.intersection(params)
511
  if used_globals:
512
    msg = ("The following hypervisor parameters are global and cannot"
513
           " be customized at instance level, please modify them at"
514
           " cluster level: %s" % utils.CommaJoin(used_globals))
515
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
516

    
517

    
518
def _CheckNodeOnline(lu, node):
519
  """Ensure that a given node is online.
520

521
  @param lu: the LU on behalf of which we make the check
522
  @param node: the node to check
523
  @raise errors.OpPrereqError: if the node is offline
524

525
  """
526
  if lu.cfg.GetNodeInfo(node).offline:
527
    raise errors.OpPrereqError("Can't use offline node %s" % node,
528
                               errors.ECODE_INVAL)
529

    
530

    
531
def _CheckNodeNotDrained(lu, node):
532
  """Ensure that a given node is not drained.
533

534
  @param lu: the LU on behalf of which we make the check
535
  @param node: the node to check
536
  @raise errors.OpPrereqError: if the node is drained
537

538
  """
539
  if lu.cfg.GetNodeInfo(node).drained:
540
    raise errors.OpPrereqError("Can't use drained node %s" % node,
541
                               errors.ECODE_INVAL)
542

    
543

    
544
def _ExpandItemName(fn, name, kind):
545
  """Expand an item name.
546

547
  @param fn: the function to use for expansion
548
  @param name: requested item name
549
  @param kind: text description ('Node' or 'Instance')
550
  @return: the resolved (full) name
551
  @raise errors.OpPrereqError: if the item is not found
552

553
  """
554
  full_name = fn(name)
555
  if full_name is None:
556
    raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
557
                               errors.ECODE_NOENT)
558
  return full_name
559

    
560

    
561
def _ExpandNodeName(cfg, name):
562
  """Wrapper over L{_ExpandItemName} for nodes."""
563
  return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
564

    
565

    
566
def _ExpandInstanceName(cfg, name):
567
  """Wrapper over L{_ExpandItemName} for instance."""
568
  return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
569

    
570

    
571
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
572
                          memory, vcpus, nics, disk_template, disks,
573
                          bep, hvp, hypervisor_name):
574
  """Builds instance related env variables for hooks
575

576
  This builds the hook environment from individual variables.
577

578
  @type name: string
579
  @param name: the name of the instance
580
  @type primary_node: string
581
  @param primary_node: the name of the instance's primary node
582
  @type secondary_nodes: list
583
  @param secondary_nodes: list of secondary nodes as strings
584
  @type os_type: string
585
  @param os_type: the name of the instance's OS
586
  @type status: boolean
587
  @param status: the should_run status of the instance
588
  @type memory: string
589
  @param memory: the memory size of the instance
590
  @type vcpus: string
591
  @param vcpus: the count of VCPUs the instance has
592
  @type nics: list
593
  @param nics: list of tuples (ip, mac, mode, link) representing
594
      the NICs the instance has
595
  @type disk_template: string
596
  @param disk_template: the disk template of the instance
597
  @type disks: list
598
  @param disks: the list of (size, mode) pairs
599
  @type bep: dict
600
  @param bep: the backend parameters for the instance
601
  @type hvp: dict
602
  @param hvp: the hypervisor parameters for the instance
603
  @type hypervisor_name: string
604
  @param hypervisor_name: the hypervisor for the instance
605
  @rtype: dict
606
  @return: the hook environment for this instance
607

608
  """
609
  if status:
610
    str_status = "up"
611
  else:
612
    str_status = "down"
613
  env = {
614
    "OP_TARGET": name,
615
    "INSTANCE_NAME": name,
616
    "INSTANCE_PRIMARY": primary_node,
617
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
618
    "INSTANCE_OS_TYPE": os_type,
619
    "INSTANCE_STATUS": str_status,
620
    "INSTANCE_MEMORY": memory,
621
    "INSTANCE_VCPUS": vcpus,
622
    "INSTANCE_DISK_TEMPLATE": disk_template,
623
    "INSTANCE_HYPERVISOR": hypervisor_name,
624
  }
625

    
626
  if nics:
627
    nic_count = len(nics)
628
    for idx, (ip, mac, mode, link) in enumerate(nics):
629
      if ip is None:
630
        ip = ""
631
      env["INSTANCE_NIC%d_IP" % idx] = ip
632
      env["INSTANCE_NIC%d_MAC" % idx] = mac
633
      env["INSTANCE_NIC%d_MODE" % idx] = mode
634
      env["INSTANCE_NIC%d_LINK" % idx] = link
635
      if mode == constants.NIC_MODE_BRIDGED:
636
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
637
  else:
638
    nic_count = 0
639

    
640
  env["INSTANCE_NIC_COUNT"] = nic_count
641

    
642
  if disks:
643
    disk_count = len(disks)
644
    for idx, (size, mode) in enumerate(disks):
645
      env["INSTANCE_DISK%d_SIZE" % idx] = size
646
      env["INSTANCE_DISK%d_MODE" % idx] = mode
647
  else:
648
    disk_count = 0
649

    
650
  env["INSTANCE_DISK_COUNT"] = disk_count
651

    
652
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
653
    for key, value in source.items():
654
      env["INSTANCE_%s_%s" % (kind, key)] = value
655

    
656
  return env
657

    
658

    
659
def _NICListToTuple(lu, nics):
660
  """Build a list of nic information tuples.
661

662
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
663
  value in LUQueryInstanceData.
664

665
  @type lu:  L{LogicalUnit}
666
  @param lu: the logical unit on whose behalf we execute
667
  @type nics: list of L{objects.NIC}
668
  @param nics: list of nics to convert to hooks tuples
669

670
  """
671
  hooks_nics = []
672
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[constants.PP_DEFAULT]
673
  for nic in nics:
674
    ip = nic.ip
675
    mac = nic.mac
676
    filled_params = objects.FillDict(c_nicparams, nic.nicparams)
677
    mode = filled_params[constants.NIC_MODE]
678
    link = filled_params[constants.NIC_LINK]
679
    hooks_nics.append((ip, mac, mode, link))
680
  return hooks_nics
681

    
682

    
683
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
684
  """Builds instance related env variables for hooks from an object.
685

686
  @type lu: L{LogicalUnit}
687
  @param lu: the logical unit on whose behalf we execute
688
  @type instance: L{objects.Instance}
689
  @param instance: the instance for which we should build the
690
      environment
691
  @type override: dict
692
  @param override: dictionary with key/values that will override
693
      our values
694
  @rtype: dict
695
  @return: the hook environment dictionary
696

697
  """
698
  cluster = lu.cfg.GetClusterInfo()
699
  bep = cluster.FillBE(instance)
700
  hvp = cluster.FillHV(instance)
701
  args = {
702
    'name': instance.name,
703
    'primary_node': instance.primary_node,
704
    'secondary_nodes': instance.secondary_nodes,
705
    'os_type': instance.os,
706
    'status': instance.admin_up,
707
    'memory': bep[constants.BE_MEMORY],
708
    'vcpus': bep[constants.BE_VCPUS],
709
    'nics': _NICListToTuple(lu, instance.nics),
710
    'disk_template': instance.disk_template,
711
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
712
    'bep': bep,
713
    'hvp': hvp,
714
    'hypervisor_name': instance.hypervisor,
715
  }
716
  if override:
717
    args.update(override)
718
  return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142
719

    
720

    
721
def _AdjustCandidatePool(lu, exceptions):
722
  """Adjust the candidate pool after node operations.
723

724
  """
725
  mod_list = lu.cfg.MaintainCandidatePool(exceptions)
726
  if mod_list:
727
    lu.LogInfo("Promoted nodes to master candidate role: %s",
728
               utils.CommaJoin(node.name for node in mod_list))
729
    for name in mod_list:
730
      lu.context.ReaddNode(name)
731
  mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
732
  if mc_now > mc_max:
733
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
734
               (mc_now, mc_max))
735

    
736

    
737
def _DecideSelfPromotion(lu, exceptions=None):
738
  """Decide whether I should promote myself as a master candidate.
739

740
  """
741
  cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
742
  mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
743
  # the new node will increase mc_max with one, so:
744
  mc_should = min(mc_should + 1, cp_size)
745
  return mc_now < mc_should
746

    
747

    
748
def _CheckNicsBridgesExist(lu, target_nics, target_node,
749
                               profile=constants.PP_DEFAULT):
750
  """Check that the brigdes needed by a list of nics exist.
751

752
  """
753
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[profile]
754
  paramslist = [objects.FillDict(c_nicparams, nic.nicparams)
755
                for nic in target_nics]
756
  brlist = [params[constants.NIC_LINK] for params in paramslist
757
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
758
  if brlist:
759
    result = lu.rpc.call_bridges_exist(target_node, brlist)
760
    result.Raise("Error checking bridges on destination node '%s'" %
761
                 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
762

    
763

    
764
def _CheckInstanceBridgesExist(lu, instance, node=None):
765
  """Check that the brigdes needed by an instance exist.
766

767
  """
768
  if node is None:
769
    node = instance.primary_node
770
  _CheckNicsBridgesExist(lu, instance.nics, node)
771

    
772

    
773
def _CheckOSVariant(os_obj, name):
774
  """Check whether an OS name conforms to the os variants specification.
775

776
  @type os_obj: L{objects.OS}
777
  @param os_obj: OS object to check
778
  @type name: string
779
  @param name: OS name passed by the user, to check for validity
780

781
  """
782
  if not os_obj.supported_variants:
783
    return
784
  try:
785
    variant = name.split("+", 1)[1]
786
  except IndexError:
787
    raise errors.OpPrereqError("OS name must include a variant",
788
                               errors.ECODE_INVAL)
789

    
790
  if variant not in os_obj.supported_variants:
791
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
792

    
793

    
794
def _GetNodeInstancesInner(cfg, fn):
795
  return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
796

    
797

    
798
def _GetNodeInstances(cfg, node_name):
799
  """Returns a list of all primary and secondary instances on a node.
800

801
  """
802

    
803
  return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
804

    
805

    
806
def _GetNodePrimaryInstances(cfg, node_name):
807
  """Returns primary instances on a node.
808

809
  """
810
  return _GetNodeInstancesInner(cfg,
811
                                lambda inst: node_name == inst.primary_node)
812

    
813

    
814
def _GetNodeSecondaryInstances(cfg, node_name):
815
  """Returns secondary instances on a node.
816

817
  """
818
  return _GetNodeInstancesInner(cfg,
819
                                lambda inst: node_name in inst.secondary_nodes)
820

    
821

    
822
def _GetStorageTypeArgs(cfg, storage_type):
823
  """Returns the arguments for a storage type.
824

825
  """
826
  # Special case for file storage
827
  if storage_type == constants.ST_FILE:
828
    # storage.FileStorage wants a list of storage directories
829
    return [[cfg.GetFileStorageDir()]]
830

    
831
  return []
832

    
833

    
834
def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
835
  faulty = []
836

    
837
  for dev in instance.disks:
838
    cfg.SetDiskID(dev, node_name)
839

    
840
  result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
841
  result.Raise("Failed to get disk status from node %s" % node_name,
842
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
843

    
844
  for idx, bdev_status in enumerate(result.payload):
845
    if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
846
      faulty.append(idx)
847

    
848
  return faulty
849

    
850

    
851
class LUPostInitCluster(LogicalUnit):
852
  """Logical unit for running hooks after cluster initialization.
853

854
  """
855
  HPATH = "cluster-init"
856
  HTYPE = constants.HTYPE_CLUSTER
857
  _OP_REQP = []
858

    
859
  def BuildHooksEnv(self):
860
    """Build hooks env.
861

862
    """
863
    env = {"OP_TARGET": self.cfg.GetClusterName()}
864
    mn = self.cfg.GetMasterNode()
865
    return env, [], [mn]
866

    
867
  def CheckPrereq(self):
868
    """No prerequisites to check.
869

870
    """
871
    return True
872

    
873
  def Exec(self, feedback_fn):
874
    """Nothing to do.
875

876
    """
877
    return True
878

    
879

    
880
class LUDestroyCluster(LogicalUnit):
881
  """Logical unit for destroying the cluster.
882

883
  """
884
  HPATH = "cluster-destroy"
885
  HTYPE = constants.HTYPE_CLUSTER
886
  _OP_REQP = []
887

    
888
  def BuildHooksEnv(self):
889
    """Build hooks env.
890

891
    """
892
    env = {"OP_TARGET": self.cfg.GetClusterName()}
893
    return env, [], []
894

    
895
  def CheckPrereq(self):
896
    """Check prerequisites.
897

898
    This checks whether the cluster is empty.
899

900
    Any errors are signaled by raising errors.OpPrereqError.
901

902
    """
903
    master = self.cfg.GetMasterNode()
904

    
905
    nodelist = self.cfg.GetNodeList()
906
    if len(nodelist) != 1 or nodelist[0] != master:
907
      raise errors.OpPrereqError("There are still %d node(s) in"
908
                                 " this cluster." % (len(nodelist) - 1),
909
                                 errors.ECODE_INVAL)
910
    instancelist = self.cfg.GetInstanceList()
911
    if instancelist:
912
      raise errors.OpPrereqError("There are still %d instance(s) in"
913
                                 " this cluster." % len(instancelist),
914
                                 errors.ECODE_INVAL)
915

    
916
  def Exec(self, feedback_fn):
917
    """Destroys the cluster.
918

919
    """
920
    master = self.cfg.GetMasterNode()
921
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
922

    
923
    # Run post hooks on master node before it's removed
924
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
925
    try:
926
      hm.RunPhase(constants.HOOKS_PHASE_POST, [master])
927
    except:
928
      # pylint: disable-msg=W0702
929
      self.LogWarning("Errors occurred running hooks on %s" % master)
930

    
931
    result = self.rpc.call_node_stop_master(master, False)
932
    result.Raise("Could not disable the master role")
933

    
934
    if modify_ssh_setup:
935
      priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
936
      utils.CreateBackup(priv_key)
937
      utils.CreateBackup(pub_key)
938

    
939
    return master
940

    
941

    
942
class LUVerifyCluster(LogicalUnit):
943
  """Verifies the cluster status.
944

945
  """
946
  HPATH = "cluster-verify"
947
  HTYPE = constants.HTYPE_CLUSTER
948
  _OP_REQP = ["skip_checks", "verbose", "error_codes", "debug_simulate_errors"]
949
  REQ_BGL = False
950

    
951
  TCLUSTER = "cluster"
952
  TNODE = "node"
953
  TINSTANCE = "instance"
954

    
955
  ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
956
  EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
957
  EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
958
  EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
959
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
960
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
961
  EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
962
  ENODEDRBD = (TNODE, "ENODEDRBD")
963
  ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
964
  ENODEHOOKS = (TNODE, "ENODEHOOKS")
965
  ENODEHV = (TNODE, "ENODEHV")
966
  ENODELVM = (TNODE, "ENODELVM")
967
  ENODEN1 = (TNODE, "ENODEN1")
968
  ENODENET = (TNODE, "ENODENET")
969
  ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
970
  ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
971
  ENODERPC = (TNODE, "ENODERPC")
972
  ENODESSH = (TNODE, "ENODESSH")
973
  ENODEVERSION = (TNODE, "ENODEVERSION")
974
  ENODESETUP = (TNODE, "ENODESETUP")
975
  ENODETIME = (TNODE, "ENODETIME")
976

    
977
  ETYPE_FIELD = "code"
978
  ETYPE_ERROR = "ERROR"
979
  ETYPE_WARNING = "WARNING"
980

    
981
  def ExpandNames(self):
982
    self.needed_locks = {
983
      locking.LEVEL_NODE: locking.ALL_SET,
984
      locking.LEVEL_INSTANCE: locking.ALL_SET,
985
    }
986
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
987

    
988
  def _Error(self, ecode, item, msg, *args, **kwargs):
989
    """Format an error message.
990

991
    Based on the opcode's error_codes parameter, either format a
992
    parseable error code, or a simpler error string.
993

994
    This must be called only from Exec and functions called from Exec.
995

996
    """
997
    ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
998
    itype, etxt = ecode
999
    # first complete the msg
1000
    if args:
1001
      msg = msg % args
1002
    # then format the whole message
1003
    if self.op.error_codes:
1004
      msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1005
    else:
1006
      if item:
1007
        item = " " + item
1008
      else:
1009
        item = ""
1010
      msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1011
    # and finally report it via the feedback_fn
1012
    self._feedback_fn("  - %s" % msg)
1013

    
1014
  def _ErrorIf(self, cond, *args, **kwargs):
1015
    """Log an error message if the passed condition is True.
1016

1017
    """
1018
    cond = bool(cond) or self.op.debug_simulate_errors
1019
    if cond:
1020
      self._Error(*args, **kwargs)
1021
    # do not mark the operation as failed for WARN cases only
1022
    if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
1023
      self.bad = self.bad or cond
1024

    
1025
  def _VerifyNode(self, nodeinfo, file_list, local_cksum,
1026
                  node_result, master_files, drbd_map, vg_name):
1027
    """Run multiple tests against a node.
1028

1029
    Test list:
1030

1031
      - compares ganeti version
1032
      - checks vg existence and size > 20G
1033
      - checks config file checksum
1034
      - checks ssh to other nodes
1035

1036
    @type nodeinfo: L{objects.Node}
1037
    @param nodeinfo: the node to check
1038
    @param file_list: required list of files
1039
    @param local_cksum: dictionary of local files and their checksums
1040
    @param node_result: the results from the node
1041
    @param master_files: list of files that only masters should have
1042
    @param drbd_map: the useddrbd minors for this node, in
1043
        form of minor: (instance, must_exist) which correspond to instances
1044
        and their running status
1045
    @param vg_name: Ganeti Volume Group (result of self.cfg.GetVGName())
1046

1047
    """
1048
    node = nodeinfo.name
1049
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1050

    
1051
    # main result, node_result should be a non-empty dict
1052
    test = not node_result or not isinstance(node_result, dict)
1053
    _ErrorIf(test, self.ENODERPC, node,
1054
                  "unable to verify node: no data returned")
1055
    if test:
1056
      return
1057

    
1058
    # compares ganeti version
1059
    local_version = constants.PROTOCOL_VERSION
1060
    remote_version = node_result.get('version', None)
1061
    test = not (remote_version and
1062
                isinstance(remote_version, (list, tuple)) and
1063
                len(remote_version) == 2)
1064
    _ErrorIf(test, self.ENODERPC, node,
1065
             "connection to node returned invalid data")
1066
    if test:
1067
      return
1068

    
1069
    test = local_version != remote_version[0]
1070
    _ErrorIf(test, self.ENODEVERSION, node,
1071
             "incompatible protocol versions: master %s,"
1072
             " node %s", local_version, remote_version[0])
1073
    if test:
1074
      return
1075

    
1076
    # node seems compatible, we can actually try to look into its results
1077

    
1078
    # full package version
1079
    self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
1080
                  self.ENODEVERSION, node,
1081
                  "software version mismatch: master %s, node %s",
1082
                  constants.RELEASE_VERSION, remote_version[1],
1083
                  code=self.ETYPE_WARNING)
1084

    
1085
    # checks vg existence and size > 20G
1086
    if vg_name is not None:
1087
      vglist = node_result.get(constants.NV_VGLIST, None)
1088
      test = not vglist
1089
      _ErrorIf(test, self.ENODELVM, node, "unable to check volume groups")
1090
      if not test:
1091
        vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
1092
                                              constants.MIN_VG_SIZE)
1093
        _ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
1094

    
1095
    # checks config file checksum
1096

    
1097
    remote_cksum = node_result.get(constants.NV_FILELIST, None)
1098
    test = not isinstance(remote_cksum, dict)
1099
    _ErrorIf(test, self.ENODEFILECHECK, node,
1100
             "node hasn't returned file checksum data")
1101
    if not test:
1102
      for file_name in file_list:
1103
        node_is_mc = nodeinfo.master_candidate
1104
        must_have = (file_name not in master_files) or node_is_mc
1105
        # missing
1106
        test1 = file_name not in remote_cksum
1107
        # invalid checksum
1108
        test2 = not test1 and remote_cksum[file_name] != local_cksum[file_name]
1109
        # existing and good
1110
        test3 = not test1 and remote_cksum[file_name] == local_cksum[file_name]
1111
        _ErrorIf(test1 and must_have, self.ENODEFILECHECK, node,
1112
                 "file '%s' missing", file_name)
1113
        _ErrorIf(test2 and must_have, self.ENODEFILECHECK, node,
1114
                 "file '%s' has wrong checksum", file_name)
1115
        # not candidate and this is not a must-have file
1116
        _ErrorIf(test2 and not must_have, self.ENODEFILECHECK, node,
1117
                 "file '%s' should not exist on non master"
1118
                 " candidates (and the file is outdated)", file_name)
1119
        # all good, except non-master/non-must have combination
1120
        _ErrorIf(test3 and not must_have, self.ENODEFILECHECK, node,
1121
                 "file '%s' should not exist"
1122
                 " on non master candidates", file_name)
1123

    
1124
    # checks ssh to any
1125

    
1126
    test = constants.NV_NODELIST not in node_result
1127
    _ErrorIf(test, self.ENODESSH, node,
1128
             "node hasn't returned node ssh connectivity data")
1129
    if not test:
1130
      if node_result[constants.NV_NODELIST]:
1131
        for a_node, a_msg in node_result[constants.NV_NODELIST].items():
1132
          _ErrorIf(True, self.ENODESSH, node,
1133
                   "ssh communication with node '%s': %s", a_node, a_msg)
1134

    
1135
    test = constants.NV_NODENETTEST not in node_result
1136
    _ErrorIf(test, self.ENODENET, node,
1137
             "node hasn't returned node tcp connectivity data")
1138
    if not test:
1139
      if node_result[constants.NV_NODENETTEST]:
1140
        nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys())
1141
        for anode in nlist:
1142
          _ErrorIf(True, self.ENODENET, node,
1143
                   "tcp communication with node '%s': %s",
1144
                   anode, node_result[constants.NV_NODENETTEST][anode])
1145

    
1146
    hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
1147
    if isinstance(hyp_result, dict):
1148
      for hv_name, hv_result in hyp_result.iteritems():
1149
        test = hv_result is not None
1150
        _ErrorIf(test, self.ENODEHV, node,
1151
                 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1152

    
1153
    # check used drbd list
1154
    if vg_name is not None:
1155
      used_minors = node_result.get(constants.NV_DRBDLIST, [])
1156
      test = not isinstance(used_minors, (tuple, list))
1157
      _ErrorIf(test, self.ENODEDRBD, node,
1158
               "cannot parse drbd status file: %s", str(used_minors))
1159
      if not test:
1160
        for minor, (iname, must_exist) in drbd_map.items():
1161
          test = minor not in used_minors and must_exist
1162
          _ErrorIf(test, self.ENODEDRBD, node,
1163
                   "drbd minor %d of instance %s is not active",
1164
                   minor, iname)
1165
        for minor in used_minors:
1166
          test = minor not in drbd_map
1167
          _ErrorIf(test, self.ENODEDRBD, node,
1168
                   "unallocated drbd minor %d is in use", minor)
1169
    test = node_result.get(constants.NV_NODESETUP,
1170
                           ["Missing NODESETUP results"])
1171
    _ErrorIf(test, self.ENODESETUP, node, "node setup error: %s",
1172
             "; ".join(test))
1173

    
1174
    # check pv names
1175
    if vg_name is not None:
1176
      pvlist = node_result.get(constants.NV_PVLIST, None)
1177
      test = pvlist is None
1178
      _ErrorIf(test, self.ENODELVM, node, "Can't get PV list from node")
1179
      if not test:
1180
        # check that ':' is not present in PV names, since it's a
1181
        # special character for lvcreate (denotes the range of PEs to
1182
        # use on the PV)
1183
        for _, pvname, owner_vg in pvlist:
1184
          test = ":" in pvname
1185
          _ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV"
1186
                   " '%s' of VG '%s'", pvname, owner_vg)
1187

    
1188
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
1189
                      node_instance, n_offline):
1190
    """Verify an instance.
1191

1192
    This function checks to see if the required block devices are
1193
    available on the instance's node.
1194

1195
    """
1196
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1197
    node_current = instanceconfig.primary_node
1198

    
1199
    node_vol_should = {}
1200
    instanceconfig.MapLVsByNode(node_vol_should)
1201

    
1202
    for node in node_vol_should:
1203
      if node in n_offline:
1204
        # ignore missing volumes on offline nodes
1205
        continue
1206
      for volume in node_vol_should[node]:
1207
        test = node not in node_vol_is or volume not in node_vol_is[node]
1208
        _ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
1209
                 "volume %s missing on node %s", volume, node)
1210

    
1211
    if instanceconfig.admin_up:
1212
      test = ((node_current not in node_instance or
1213
               not instance in node_instance[node_current]) and
1214
              node_current not in n_offline)
1215
      _ErrorIf(test, self.EINSTANCEDOWN, instance,
1216
               "instance not running on its primary node %s",
1217
               node_current)
1218

    
1219
    for node in node_instance:
1220
      if (not node == node_current):
1221
        test = instance in node_instance[node]
1222
        _ErrorIf(test, self.EINSTANCEWRONGNODE, instance,
1223
                 "instance should not run on node %s", node)
1224

    
1225
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is):
1226
    """Verify if there are any unknown volumes in the cluster.
1227

1228
    The .os, .swap and backup volumes are ignored. All other volumes are
1229
    reported as unknown.
1230

1231
    """
1232
    for node in node_vol_is:
1233
      for volume in node_vol_is[node]:
1234
        test = (node not in node_vol_should or
1235
                volume not in node_vol_should[node])
1236
        self._ErrorIf(test, self.ENODEORPHANLV, node,
1237
                      "volume %s is unknown", volume)
1238

    
1239
  def _VerifyOrphanInstances(self, instancelist, node_instance):
1240
    """Verify the list of running instances.
1241

1242
    This checks what instances are running but unknown to the cluster.
1243

1244
    """
1245
    for node in node_instance:
1246
      for o_inst in node_instance[node]:
1247
        test = o_inst not in instancelist
1248
        self._ErrorIf(test, self.ENODEORPHANINSTANCE, node,
1249
                      "instance %s on node %s should not exist", o_inst, node)
1250

    
1251
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg):
1252
    """Verify N+1 Memory Resilience.
1253

1254
    Check that if one single node dies we can still start all the instances it
1255
    was primary for.
1256

1257
    """
1258
    for node, nodeinfo in node_info.iteritems():
1259
      # This code checks that every node which is now listed as secondary has
1260
      # enough memory to host all instances it is supposed to should a single
1261
      # other node in the cluster fail.
1262
      # FIXME: not ready for failover to an arbitrary node
1263
      # FIXME: does not support file-backed instances
1264
      # WARNING: we currently take into account down instances as well as up
1265
      # ones, considering that even if they're down someone might want to start
1266
      # them even in the event of a node failure.
1267
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
1268
        needed_mem = 0
1269
        for instance in instances:
1270
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
1271
          if bep[constants.BE_AUTO_BALANCE]:
1272
            needed_mem += bep[constants.BE_MEMORY]
1273
        test = nodeinfo['mfree'] < needed_mem
1274
        self._ErrorIf(test, self.ENODEN1, node,
1275
                      "not enough memory on to accommodate"
1276
                      " failovers should peer node %s fail", prinode)
1277

    
1278
  def CheckPrereq(self):
1279
    """Check prerequisites.
1280

1281
    Transform the list of checks we're going to skip into a set and check that
1282
    all its members are valid.
1283

1284
    """
1285
    self.skip_set = frozenset(self.op.skip_checks)
1286
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
1287
      raise errors.OpPrereqError("Invalid checks to be skipped specified",
1288
                                 errors.ECODE_INVAL)
1289

    
1290
  def BuildHooksEnv(self):
1291
    """Build hooks env.
1292

1293
    Cluster-Verify hooks just ran in the post phase and their failure makes
1294
    the output be logged in the verify output and the verification to fail.
1295

1296
    """
1297
    all_nodes = self.cfg.GetNodeList()
1298
    env = {
1299
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
1300
      }
1301
    for node in self.cfg.GetAllNodesInfo().values():
1302
      env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
1303

    
1304
    return env, [], all_nodes
1305

    
1306
  def Exec(self, feedback_fn):
1307
    """Verify integrity of cluster, performing various test on nodes.
1308

1309
    """
1310
    self.bad = False
1311
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1312
    verbose = self.op.verbose
1313
    self._feedback_fn = feedback_fn
1314
    feedback_fn("* Verifying global settings")
1315
    for msg in self.cfg.VerifyConfig():
1316
      _ErrorIf(True, self.ECLUSTERCFG, None, msg)
1317

    
1318
    vg_name = self.cfg.GetVGName()
1319
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
1320
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
1321
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
1322
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
1323
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
1324
                        for iname in instancelist)
1325
    i_non_redundant = [] # Non redundant instances
1326
    i_non_a_balanced = [] # Non auto-balanced instances
1327
    n_offline = [] # List of offline nodes
1328
    n_drained = [] # List of nodes being drained
1329
    node_volume = {}
1330
    node_instance = {}
1331
    node_info = {}
1332
    instance_cfg = {}
1333

    
1334
    # FIXME: verify OS list
1335
    # do local checksums
1336
    master_files = [constants.CLUSTER_CONF_FILE]
1337

    
1338
    file_names = ssconf.SimpleStore().GetFileList()
1339
    file_names.append(constants.SSL_CERT_FILE)
1340
    file_names.append(constants.RAPI_CERT_FILE)
1341
    file_names.extend(master_files)
1342

    
1343
    local_checksums = utils.FingerprintFiles(file_names)
1344

    
1345
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
1346
    node_verify_param = {
1347
      constants.NV_FILELIST: file_names,
1348
      constants.NV_NODELIST: [node.name for node in nodeinfo
1349
                              if not node.offline],
1350
      constants.NV_HYPERVISOR: hypervisors,
1351
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
1352
                                  node.secondary_ip) for node in nodeinfo
1353
                                 if not node.offline],
1354
      constants.NV_INSTANCELIST: hypervisors,
1355
      constants.NV_VERSION: None,
1356
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
1357
      constants.NV_NODESETUP: None,
1358
      constants.NV_TIME: None,
1359
      }
1360

    
1361
    if vg_name is not None:
1362
      node_verify_param[constants.NV_VGLIST] = None
1363
      node_verify_param[constants.NV_LVLIST] = vg_name
1364
      node_verify_param[constants.NV_PVLIST] = [vg_name]
1365
      node_verify_param[constants.NV_DRBDLIST] = None
1366

    
1367
    # Due to the way our RPC system works, exact response times cannot be
1368
    # guaranteed (e.g. a broken node could run into a timeout). By keeping the
1369
    # time before and after executing the request, we can at least have a time
1370
    # window.
1371
    nvinfo_starttime = time.time()
1372
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
1373
                                           self.cfg.GetClusterName())
1374
    nvinfo_endtime = time.time()
1375

    
1376
    cluster = self.cfg.GetClusterInfo()
1377
    master_node = self.cfg.GetMasterNode()
1378
    all_drbd_map = self.cfg.ComputeDRBDMap()
1379

    
1380
    feedback_fn("* Verifying node status")
1381
    for node_i in nodeinfo:
1382
      node = node_i.name
1383

    
1384
      if node_i.offline:
1385
        if verbose:
1386
          feedback_fn("* Skipping offline node %s" % (node,))
1387
        n_offline.append(node)
1388
        continue
1389

    
1390
      if node == master_node:
1391
        ntype = "master"
1392
      elif node_i.master_candidate:
1393
        ntype = "master candidate"
1394
      elif node_i.drained:
1395
        ntype = "drained"
1396
        n_drained.append(node)
1397
      else:
1398
        ntype = "regular"
1399
      if verbose:
1400
        feedback_fn("* Verifying node %s (%s)" % (node, ntype))
1401

    
1402
      msg = all_nvinfo[node].fail_msg
1403
      _ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg)
1404
      if msg:
1405
        continue
1406

    
1407
      nresult = all_nvinfo[node].payload
1408
      node_drbd = {}
1409
      for minor, instance in all_drbd_map[node].items():
1410
        test = instance not in instanceinfo
1411
        _ErrorIf(test, self.ECLUSTERCFG, None,
1412
                 "ghost instance '%s' in temporary DRBD map", instance)
1413
          # ghost instance should not be running, but otherwise we
1414
          # don't give double warnings (both ghost instance and
1415
          # unallocated minor in use)
1416
        if test:
1417
          node_drbd[minor] = (instance, False)
1418
        else:
1419
          instance = instanceinfo[instance]
1420
          node_drbd[minor] = (instance.name, instance.admin_up)
1421

    
1422
      self._VerifyNode(node_i, file_names, local_checksums,
1423
                       nresult, master_files, node_drbd, vg_name)
1424

    
1425
      lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1426
      if vg_name is None:
1427
        node_volume[node] = {}
1428
      elif isinstance(lvdata, basestring):
1429
        _ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s",
1430
                 utils.SafeEncode(lvdata))
1431
        node_volume[node] = {}
1432
      elif not isinstance(lvdata, dict):
1433
        _ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)")
1434
        continue
1435
      else:
1436
        node_volume[node] = lvdata
1437

    
1438
      # node_instance
1439
      idata = nresult.get(constants.NV_INSTANCELIST, None)
1440
      test = not isinstance(idata, list)
1441
      _ErrorIf(test, self.ENODEHV, node,
1442
               "rpc call to node failed (instancelist)")
1443
      if test:
1444
        continue
1445

    
1446
      node_instance[node] = idata
1447

    
1448
      # node_info
1449
      nodeinfo = nresult.get(constants.NV_HVINFO, None)
1450
      test = not isinstance(nodeinfo, dict)
1451
      _ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)")
1452
      if test:
1453
        continue
1454

    
1455
      # Node time
1456
      ntime = nresult.get(constants.NV_TIME, None)
1457
      try:
1458
        ntime_merged = utils.MergeTime(ntime)
1459
      except (ValueError, TypeError):
1460
        _ErrorIf(True, self.ENODETIME, node, "Node returned invalid time")
1461

    
1462
      if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
1463
        ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
1464
      elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
1465
        ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
1466
      else:
1467
        ntime_diff = None
1468

    
1469
      _ErrorIf(ntime_diff is not None, self.ENODETIME, node,
1470
               "Node time diverges by at least %s from master node time",
1471
               ntime_diff)
1472

    
1473
      if ntime_diff is not None:
1474
        continue
1475

    
1476
      try:
1477
        node_info[node] = {
1478
          "mfree": int(nodeinfo['memory_free']),
1479
          "pinst": [],
1480
          "sinst": [],
1481
          # dictionary holding all instances this node is secondary for,
1482
          # grouped by their primary node. Each key is a cluster node, and each
1483
          # value is a list of instances which have the key as primary and the
1484
          # current node as secondary.  this is handy to calculate N+1 memory
1485
          # availability if you can only failover from a primary to its
1486
          # secondary.
1487
          "sinst-by-pnode": {},
1488
        }
1489
        # FIXME: devise a free space model for file based instances as well
1490
        if vg_name is not None:
1491
          test = (constants.NV_VGLIST not in nresult or
1492
                  vg_name not in nresult[constants.NV_VGLIST])
1493
          _ErrorIf(test, self.ENODELVM, node,
1494
                   "node didn't return data for the volume group '%s'"
1495
                   " - it is either missing or broken", vg_name)
1496
          if test:
1497
            continue
1498
          node_info[node]["dfree"] = int(nresult[constants.NV_VGLIST][vg_name])
1499
      except (ValueError, KeyError):
1500
        _ErrorIf(True, self.ENODERPC, node,
1501
                 "node returned invalid nodeinfo, check lvm/hypervisor")
1502
        continue
1503

    
1504
    node_vol_should = {}
1505

    
1506
    feedback_fn("* Verifying instance status")
1507
    for instance in instancelist:
1508
      if verbose:
1509
        feedback_fn("* Verifying instance %s" % instance)
1510
      inst_config = instanceinfo[instance]
1511
      self._VerifyInstance(instance, inst_config, node_volume,
1512
                           node_instance, n_offline)
1513
      inst_nodes_offline = []
1514

    
1515
      inst_config.MapLVsByNode(node_vol_should)
1516

    
1517
      instance_cfg[instance] = inst_config
1518

    
1519
      pnode = inst_config.primary_node
1520
      _ErrorIf(pnode not in node_info and pnode not in n_offline,
1521
               self.ENODERPC, pnode, "instance %s, connection to"
1522
               " primary node failed", instance)
1523
      if pnode in node_info:
1524
        node_info[pnode]['pinst'].append(instance)
1525

    
1526
      if pnode in n_offline:
1527
        inst_nodes_offline.append(pnode)
1528

    
1529
      # If the instance is non-redundant we cannot survive losing its primary
1530
      # node, so we are not N+1 compliant. On the other hand we have no disk
1531
      # templates with more than one secondary so that situation is not well
1532
      # supported either.
1533
      # FIXME: does not support file-backed instances
1534
      if len(inst_config.secondary_nodes) == 0:
1535
        i_non_redundant.append(instance)
1536
      _ErrorIf(len(inst_config.secondary_nodes) > 1,
1537
               self.EINSTANCELAYOUT, instance,
1538
               "instance has multiple secondary nodes", code="WARNING")
1539

    
1540
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1541
        i_non_a_balanced.append(instance)
1542

    
1543
      for snode in inst_config.secondary_nodes:
1544
        _ErrorIf(snode not in node_info and snode not in n_offline,
1545
                 self.ENODERPC, snode,
1546
                 "instance %s, connection to secondary node"
1547
                 "failed", instance)
1548

    
1549
        if snode in node_info:
1550
          node_info[snode]['sinst'].append(instance)
1551
          if pnode not in node_info[snode]['sinst-by-pnode']:
1552
            node_info[snode]['sinst-by-pnode'][pnode] = []
1553
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
1554

    
1555
        if snode in n_offline:
1556
          inst_nodes_offline.append(snode)
1557

    
1558
      # warn that the instance lives on offline nodes
1559
      _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
1560
               "instance lives on offline node(s) %s",
1561
               utils.CommaJoin(inst_nodes_offline))
1562

    
1563
    feedback_fn("* Verifying orphan volumes")
1564
    self._VerifyOrphanVolumes(node_vol_should, node_volume)
1565

    
1566
    feedback_fn("* Verifying remaining instances")
1567
    self._VerifyOrphanInstances(instancelist, node_instance)
1568

    
1569
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1570
      feedback_fn("* Verifying N+1 Memory redundancy")
1571
      self._VerifyNPlusOneMemory(node_info, instance_cfg)
1572

    
1573
    feedback_fn("* Other Notes")
1574
    if i_non_redundant:
1575
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1576
                  % len(i_non_redundant))
1577

    
1578
    if i_non_a_balanced:
1579
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1580
                  % len(i_non_a_balanced))
1581

    
1582
    if n_offline:
1583
      feedback_fn("  - NOTICE: %d offline node(s) found." % len(n_offline))
1584

    
1585
    if n_drained:
1586
      feedback_fn("  - NOTICE: %d drained node(s) found." % len(n_drained))
1587

    
1588
    return not self.bad
1589

    
1590
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1591
    """Analyze the post-hooks' result
1592

1593
    This method analyses the hook result, handles it, and sends some
1594
    nicely-formatted feedback back to the user.
1595

1596
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1597
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1598
    @param hooks_results: the results of the multi-node hooks rpc call
1599
    @param feedback_fn: function used send feedback back to the caller
1600
    @param lu_result: previous Exec result
1601
    @return: the new Exec result, based on the previous result
1602
        and hook results
1603

1604
    """
1605
    # We only really run POST phase hooks, and are only interested in
1606
    # their results
1607
    if phase == constants.HOOKS_PHASE_POST:
1608
      # Used to change hooks' output to proper indentation
1609
      indent_re = re.compile('^', re.M)
1610
      feedback_fn("* Hooks Results")
1611
      assert hooks_results, "invalid result from hooks"
1612

    
1613
      for node_name in hooks_results:
1614
        res = hooks_results[node_name]
1615
        msg = res.fail_msg
1616
        test = msg and not res.offline
1617
        self._ErrorIf(test, self.ENODEHOOKS, node_name,
1618
                      "Communication failure in hooks execution: %s", msg)
1619
        if res.offline or msg:
1620
          # No need to investigate payload if node is offline or gave an error.
1621
          # override manually lu_result here as _ErrorIf only
1622
          # overrides self.bad
1623
          lu_result = 1
1624
          continue
1625
        for script, hkr, output in res.payload:
1626
          test = hkr == constants.HKR_FAIL
1627
          self._ErrorIf(test, self.ENODEHOOKS, node_name,
1628
                        "Script %s failed, output:", script)
1629
          if test:
1630
            output = indent_re.sub('      ', output)
1631
            feedback_fn("%s" % output)
1632
            lu_result = 0
1633

    
1634
      return lu_result
1635

    
1636

    
1637
class LUVerifyDisks(NoHooksLU):
1638
  """Verifies the cluster disks status.
1639

1640
  """
1641
  _OP_REQP = []
1642
  REQ_BGL = False
1643

    
1644
  def ExpandNames(self):
1645
    self.needed_locks = {
1646
      locking.LEVEL_NODE: locking.ALL_SET,
1647
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1648
    }
1649
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1650

    
1651
  def CheckPrereq(self):
1652
    """Check prerequisites.
1653

1654
    This has no prerequisites.
1655

1656
    """
1657
    pass
1658

    
1659
  def Exec(self, feedback_fn):
1660
    """Verify integrity of cluster disks.
1661

1662
    @rtype: tuple of three items
1663
    @return: a tuple of (dict of node-to-node_error, list of instances
1664
        which need activate-disks, dict of instance: (node, volume) for
1665
        missing volumes
1666

1667
    """
1668
    result = res_nodes, res_instances, res_missing = {}, [], {}
1669

    
1670
    vg_name = self.cfg.GetVGName()
1671
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1672
    instances = [self.cfg.GetInstanceInfo(name)
1673
                 for name in self.cfg.GetInstanceList()]
1674

    
1675
    nv_dict = {}
1676
    for inst in instances:
1677
      inst_lvs = {}
1678
      if (not inst.admin_up or
1679
          inst.disk_template not in constants.DTS_NET_MIRROR):
1680
        continue
1681
      inst.MapLVsByNode(inst_lvs)
1682
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1683
      for node, vol_list in inst_lvs.iteritems():
1684
        for vol in vol_list:
1685
          nv_dict[(node, vol)] = inst
1686

    
1687
    if not nv_dict:
1688
      return result
1689

    
1690
    node_lvs = self.rpc.call_lv_list(nodes, vg_name)
1691

    
1692
    for node in nodes:
1693
      # node_volume
1694
      node_res = node_lvs[node]
1695
      if node_res.offline:
1696
        continue
1697
      msg = node_res.fail_msg
1698
      if msg:
1699
        logging.warning("Error enumerating LVs on node %s: %s", node, msg)
1700
        res_nodes[node] = msg
1701
        continue
1702

    
1703
      lvs = node_res.payload
1704
      for lv_name, (_, _, lv_online) in lvs.items():
1705
        inst = nv_dict.pop((node, lv_name), None)
1706
        if (not lv_online and inst is not None
1707
            and inst.name not in res_instances):
1708
          res_instances.append(inst.name)
1709

    
1710
    # any leftover items in nv_dict are missing LVs, let's arrange the
1711
    # data better
1712
    for key, inst in nv_dict.iteritems():
1713
      if inst.name not in res_missing:
1714
        res_missing[inst.name] = []
1715
      res_missing[inst.name].append(key)
1716

    
1717
    return result
1718

    
1719

    
1720
class LURepairDiskSizes(NoHooksLU):
1721
  """Verifies the cluster disks sizes.
1722

1723
  """
1724
  _OP_REQP = ["instances"]
1725
  REQ_BGL = False
1726

    
1727
  def ExpandNames(self):
1728
    if not isinstance(self.op.instances, list):
1729
      raise errors.OpPrereqError("Invalid argument type 'instances'",
1730
                                 errors.ECODE_INVAL)
1731

    
1732
    if self.op.instances:
1733
      self.wanted_names = []
1734
      for name in self.op.instances:
1735
        full_name = _ExpandInstanceName(self.cfg, name)
1736
        self.wanted_names.append(full_name)
1737
      self.needed_locks = {
1738
        locking.LEVEL_NODE: [],
1739
        locking.LEVEL_INSTANCE: self.wanted_names,
1740
        }
1741
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1742
    else:
1743
      self.wanted_names = None
1744
      self.needed_locks = {
1745
        locking.LEVEL_NODE: locking.ALL_SET,
1746
        locking.LEVEL_INSTANCE: locking.ALL_SET,
1747
        }
1748
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1749

    
1750
  def DeclareLocks(self, level):
1751
    if level == locking.LEVEL_NODE and self.wanted_names is not None:
1752
      self._LockInstancesNodes(primary_only=True)
1753

    
1754
  def CheckPrereq(self):
1755
    """Check prerequisites.
1756

1757
    This only checks the optional instance list against the existing names.
1758

1759
    """
1760
    if self.wanted_names is None:
1761
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
1762

    
1763
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
1764
                             in self.wanted_names]
1765

    
1766
  def _EnsureChildSizes(self, disk):
1767
    """Ensure children of the disk have the needed disk size.
1768

1769
    This is valid mainly for DRBD8 and fixes an issue where the
1770
    children have smaller disk size.
1771

1772
    @param disk: an L{ganeti.objects.Disk} object
1773

1774
    """
1775
    if disk.dev_type == constants.LD_DRBD8:
1776
      assert disk.children, "Empty children for DRBD8?"
1777
      fchild = disk.children[0]
1778
      mismatch = fchild.size < disk.size
1779
      if mismatch:
1780
        self.LogInfo("Child disk has size %d, parent %d, fixing",
1781
                     fchild.size, disk.size)
1782
        fchild.size = disk.size
1783

    
1784
      # and we recurse on this child only, not on the metadev
1785
      return self._EnsureChildSizes(fchild) or mismatch
1786
    else:
1787
      return False
1788

    
1789
  def Exec(self, feedback_fn):
1790
    """Verify the size of cluster disks.
1791

1792
    """
1793
    # TODO: check child disks too
1794
    # TODO: check differences in size between primary/secondary nodes
1795
    per_node_disks = {}
1796
    for instance in self.wanted_instances:
1797
      pnode = instance.primary_node
1798
      if pnode not in per_node_disks:
1799
        per_node_disks[pnode] = []
1800
      for idx, disk in enumerate(instance.disks):
1801
        per_node_disks[pnode].append((instance, idx, disk))
1802

    
1803
    changed = []
1804
    for node, dskl in per_node_disks.items():
1805
      newl = [v[2].Copy() for v in dskl]
1806
      for dsk in newl:
1807
        self.cfg.SetDiskID(dsk, node)
1808
      result = self.rpc.call_blockdev_getsizes(node, newl)
1809
      if result.fail_msg:
1810
        self.LogWarning("Failure in blockdev_getsizes call to node"
1811
                        " %s, ignoring", node)
1812
        continue
1813
      if len(result.data) != len(dskl):
1814
        self.LogWarning("Invalid result from node %s, ignoring node results",
1815
                        node)
1816
        continue
1817
      for ((instance, idx, disk), size) in zip(dskl, result.data):
1818
        if size is None:
1819
          self.LogWarning("Disk %d of instance %s did not return size"
1820
                          " information, ignoring", idx, instance.name)
1821
          continue
1822
        if not isinstance(size, (int, long)):
1823
          self.LogWarning("Disk %d of instance %s did not return valid"
1824
                          " size information, ignoring", idx, instance.name)
1825
          continue
1826
        size = size >> 20
1827
        if size != disk.size:
1828
          self.LogInfo("Disk %d of instance %s has mismatched size,"
1829
                       " correcting: recorded %d, actual %d", idx,
1830
                       instance.name, disk.size, size)
1831
          disk.size = size
1832
          self.cfg.Update(instance, feedback_fn)
1833
          changed.append((instance.name, idx, size))
1834
        if self._EnsureChildSizes(disk):
1835
          self.cfg.Update(instance, feedback_fn)
1836
          changed.append((instance.name, idx, disk.size))
1837
    return changed
1838

    
1839

    
1840
class LURenameCluster(LogicalUnit):
1841
  """Rename the cluster.
1842

1843
  """
1844
  HPATH = "cluster-rename"
1845
  HTYPE = constants.HTYPE_CLUSTER
1846
  _OP_REQP = ["name"]
1847

    
1848
  def BuildHooksEnv(self):
1849
    """Build hooks env.
1850

1851
    """
1852
    env = {
1853
      "OP_TARGET": self.cfg.GetClusterName(),
1854
      "NEW_NAME": self.op.name,
1855
      }
1856
    mn = self.cfg.GetMasterNode()
1857
    all_nodes = self.cfg.GetNodeList()
1858
    return env, [mn], all_nodes
1859

    
1860
  def CheckPrereq(self):
1861
    """Verify that the passed name is a valid one.
1862

1863
    """
1864
    hostname = utils.GetHostInfo(self.op.name)
1865

    
1866
    new_name = hostname.name
1867
    self.ip = new_ip = hostname.ip
1868
    old_name = self.cfg.GetClusterName()
1869
    old_ip = self.cfg.GetMasterIP()
1870
    if new_name == old_name and new_ip == old_ip:
1871
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1872
                                 " cluster has changed",
1873
                                 errors.ECODE_INVAL)
1874
    if new_ip != old_ip:
1875
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1876
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1877
                                   " reachable on the network. Aborting." %
1878
                                   new_ip, errors.ECODE_NOTUNIQUE)
1879

    
1880
    self.op.name = new_name
1881

    
1882
  def Exec(self, feedback_fn):
1883
    """Rename the cluster.
1884

1885
    """
1886
    clustername = self.op.name
1887
    ip = self.ip
1888

    
1889
    # shutdown the master IP
1890
    master = self.cfg.GetMasterNode()
1891
    result = self.rpc.call_node_stop_master(master, False)
1892
    result.Raise("Could not disable the master role")
1893

    
1894
    try:
1895
      cluster = self.cfg.GetClusterInfo()
1896
      cluster.cluster_name = clustername
1897
      cluster.master_ip = ip
1898
      self.cfg.Update(cluster, feedback_fn)
1899

    
1900
      # update the known hosts file
1901
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
1902
      node_list = self.cfg.GetNodeList()
1903
      try:
1904
        node_list.remove(master)
1905
      except ValueError:
1906
        pass
1907
      result = self.rpc.call_upload_file(node_list,
1908
                                         constants.SSH_KNOWN_HOSTS_FILE)
1909
      for to_node, to_result in result.iteritems():
1910
        msg = to_result.fail_msg
1911
        if msg:
1912
          msg = ("Copy of file %s to node %s failed: %s" %
1913
                 (constants.SSH_KNOWN_HOSTS_FILE, to_node, msg))
1914
          self.proc.LogWarning(msg)
1915

    
1916
    finally:
1917
      result = self.rpc.call_node_start_master(master, False, False)
1918
      msg = result.fail_msg
1919
      if msg:
1920
        self.LogWarning("Could not re-enable the master role on"
1921
                        " the master, please restart manually: %s", msg)
1922

    
1923

    
1924
def _RecursiveCheckIfLVMBased(disk):
1925
  """Check if the given disk or its children are lvm-based.
1926

1927
  @type disk: L{objects.Disk}
1928
  @param disk: the disk to check
1929
  @rtype: boolean
1930
  @return: boolean indicating whether a LD_LV dev_type was found or not
1931

1932
  """
1933
  if disk.children:
1934
    for chdisk in disk.children:
1935
      if _RecursiveCheckIfLVMBased(chdisk):
1936
        return True
1937
  return disk.dev_type == constants.LD_LV
1938

    
1939

    
1940
class LUSetClusterParams(LogicalUnit):
1941
  """Change the parameters of the cluster.
1942

1943
  """
1944
  HPATH = "cluster-modify"
1945
  HTYPE = constants.HTYPE_CLUSTER
1946
  _OP_REQP = []
1947
  REQ_BGL = False
1948

    
1949
  def CheckArguments(self):
1950
    """Check parameters
1951

1952
    """
1953
    if not hasattr(self.op, "candidate_pool_size"):
1954
      self.op.candidate_pool_size = None
1955
    if self.op.candidate_pool_size is not None:
1956
      try:
1957
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1958
      except (ValueError, TypeError), err:
1959
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1960
                                   str(err), errors.ECODE_INVAL)
1961
      if self.op.candidate_pool_size < 1:
1962
        raise errors.OpPrereqError("At least one master candidate needed",
1963
                                   errors.ECODE_INVAL)
1964

    
1965
  def ExpandNames(self):
1966
    # FIXME: in the future maybe other cluster params won't require checking on
1967
    # all nodes to be modified.
1968
    self.needed_locks = {
1969
      locking.LEVEL_NODE: locking.ALL_SET,
1970
    }
1971
    self.share_locks[locking.LEVEL_NODE] = 1
1972

    
1973
  def BuildHooksEnv(self):
1974
    """Build hooks env.
1975

1976
    """
1977
    env = {
1978
      "OP_TARGET": self.cfg.GetClusterName(),
1979
      "NEW_VG_NAME": self.op.vg_name,
1980
      }
1981
    mn = self.cfg.GetMasterNode()
1982
    return env, [mn], [mn]
1983

    
1984
  def CheckPrereq(self):
1985
    """Check prerequisites.
1986

1987
    This checks whether the given params don't conflict and
1988
    if the given volume group is valid.
1989

1990
    """
1991
    if self.op.vg_name is not None and not self.op.vg_name:
1992
      instances = self.cfg.GetAllInstancesInfo().values()
1993
      for inst in instances:
1994
        for disk in inst.disks:
1995
          if _RecursiveCheckIfLVMBased(disk):
1996
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1997
                                       " lvm-based instances exist",
1998
                                       errors.ECODE_INVAL)
1999

    
2000
    node_list = self.acquired_locks[locking.LEVEL_NODE]
2001

    
2002
    # if vg_name not None, checks given volume group on all nodes
2003
    if self.op.vg_name:
2004
      vglist = self.rpc.call_vg_list(node_list)
2005
      for node in node_list:
2006
        msg = vglist[node].fail_msg
2007
        if msg:
2008
          # ignoring down node
2009
          self.LogWarning("Error while gathering data on node %s"
2010
                          " (ignoring node): %s", node, msg)
2011
          continue
2012
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
2013
                                              self.op.vg_name,
2014
                                              constants.MIN_VG_SIZE)
2015
        if vgstatus:
2016
          raise errors.OpPrereqError("Error on node '%s': %s" %
2017
                                     (node, vgstatus), errors.ECODE_ENVIRON)
2018

    
2019
    self.cluster = cluster = self.cfg.GetClusterInfo()
2020
    # validate params changes
2021
    if self.op.beparams:
2022
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
2023
      self.new_beparams = objects.FillDict(
2024
        cluster.beparams[constants.PP_DEFAULT], self.op.beparams)
2025

    
2026
    if self.op.nicparams:
2027
      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
2028
      self.new_nicparams = objects.FillDict(
2029
        cluster.nicparams[constants.PP_DEFAULT], self.op.nicparams)
2030
      objects.NIC.CheckParameterSyntax(self.new_nicparams)
2031
      nic_errors = []
2032

    
2033
      # check all instances for consistency
2034
      for instance in self.cfg.GetAllInstancesInfo().values():
2035
        for nic_idx, nic in enumerate(instance.nics):
2036
          params_copy = copy.deepcopy(nic.nicparams)
2037
          params_filled = objects.FillDict(self.new_nicparams, params_copy)
2038

    
2039
          # check parameter syntax
2040
          try:
2041
            objects.NIC.CheckParameterSyntax(params_filled)
2042
          except errors.ConfigurationError, err:
2043
            nic_errors.append("Instance %s, nic/%d: %s" %
2044
                              (instance.name, nic_idx, err))
2045

    
2046
          # if we're moving instances to routed, check that they have an ip
2047
          target_mode = params_filled[constants.NIC_MODE]
2048
          if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
2049
            nic_errors.append("Instance %s, nic/%d: routed nick with no ip" %
2050
                              (instance.name, nic_idx))
2051
      if nic_errors:
2052
        raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
2053
                                   "\n".join(nic_errors))
2054

    
2055
    # hypervisor list/parameters
2056
    self.new_hvparams = objects.FillDict(cluster.hvparams, {})
2057
    if self.op.hvparams:
2058
      if not isinstance(self.op.hvparams, dict):
2059
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input",
2060
                                   errors.ECODE_INVAL)
2061
      for hv_name, hv_dict in self.op.hvparams.items():
2062
        if hv_name not in self.new_hvparams:
2063
          self.new_hvparams[hv_name] = hv_dict
2064
        else:
2065
          self.new_hvparams[hv_name].update(hv_dict)
2066

    
2067
    # os hypervisor parameters
2068
    self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
2069
    if self.op.os_hvp:
2070
      if not isinstance(self.op.os_hvp, dict):
2071
        raise errors.OpPrereqError("Invalid 'os_hvp' parameter on input",
2072
                                   errors.ECODE_INVAL)
2073
      for os_name, hvs in self.op.os_hvp.items():
2074
        if not isinstance(hvs, dict):
2075
          raise errors.OpPrereqError(("Invalid 'os_hvp' parameter on"
2076
                                      " input"), errors.ECODE_INVAL)
2077
        if os_name not in self.new_os_hvp:
2078
          self.new_os_hvp[os_name] = hvs
2079
        else:
2080
          for hv_name, hv_dict in hvs.items():
2081
            if hv_name not in self.new_os_hvp[os_name]:
2082
              self.new_os_hvp[os_name][hv_name] = hv_dict
2083
            else:
2084
              self.new_os_hvp[os_name][hv_name].update(hv_dict)
2085

    
2086
    if self.op.enabled_hypervisors is not None:
2087
      self.hv_list = self.op.enabled_hypervisors
2088
      if not self.hv_list:
2089
        raise errors.OpPrereqError("Enabled hypervisors list must contain at"
2090
                                   " least one member",
2091
                                   errors.ECODE_INVAL)
2092
      invalid_hvs = set(self.hv_list) - constants.HYPER_TYPES
2093
      if invalid_hvs:
2094
        raise errors.OpPrereqError("Enabled hypervisors contains invalid"
2095
                                   " entries: %s" %
2096
                                   utils.CommaJoin(invalid_hvs),
2097
                                   errors.ECODE_INVAL)
2098
    else:
2099
      self.hv_list = cluster.enabled_hypervisors
2100

    
2101
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
2102
      # either the enabled list has changed, or the parameters have, validate
2103
      for hv_name, hv_params in self.new_hvparams.items():
2104
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
2105
            (self.op.enabled_hypervisors and
2106
             hv_name in self.op.enabled_hypervisors)):
2107
          # either this is a new hypervisor, or its parameters have changed
2108
          hv_class = hypervisor.GetHypervisor(hv_name)
2109
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2110
          hv_class.CheckParameterSyntax(hv_params)
2111
          _CheckHVParams(self, node_list, hv_name, hv_params)
2112

    
2113
  def Exec(self, feedback_fn):
2114
    """Change the parameters of the cluster.
2115

2116
    """
2117
    if self.op.vg_name is not None:
2118
      new_volume = self.op.vg_name
2119
      if not new_volume:
2120
        new_volume = None
2121
      if new_volume != self.cfg.GetVGName():
2122
        self.cfg.SetVGName(new_volume)
2123
      else:
2124
        feedback_fn("Cluster LVM configuration already in desired"
2125
                    " state, not changing")
2126
    if self.op.hvparams:
2127
      self.cluster.hvparams = self.new_hvparams
2128
    if self.op.os_hvp:
2129
      self.cluster.os_hvp = self.new_os_hvp
2130
    if self.op.enabled_hypervisors is not None:
2131
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
2132
    if self.op.beparams:
2133
      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
2134
    if self.op.nicparams:
2135
      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
2136

    
2137
    if self.op.candidate_pool_size is not None:
2138
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
2139
      # we need to update the pool size here, otherwise the save will fail
2140
      _AdjustCandidatePool(self, [])
2141

    
2142
    self.cfg.Update(self.cluster, feedback_fn)
2143

    
2144

    
2145
def _RedistributeAncillaryFiles(lu, additional_nodes=None):
2146
  """Distribute additional files which are part of the cluster configuration.
2147

2148
  ConfigWriter takes care of distributing the config and ssconf files, but
2149
  there are more files which should be distributed to all nodes. This function
2150
  makes sure those are copied.
2151

2152
  @param lu: calling logical unit
2153
  @param additional_nodes: list of nodes not in the config to distribute to
2154

2155
  """
2156
  # 1. Gather target nodes
2157
  myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
2158
  dist_nodes = lu.cfg.GetOnlineNodeList()
2159
  if additional_nodes is not None:
2160
    dist_nodes.extend(additional_nodes)
2161
  if myself.name in dist_nodes:
2162
    dist_nodes.remove(myself.name)
2163

    
2164
  # 2. Gather files to distribute
2165
  dist_files = set([constants.ETC_HOSTS,
2166
                    constants.SSH_KNOWN_HOSTS_FILE,
2167
                    constants.RAPI_CERT_FILE,
2168
                    constants.RAPI_USERS_FILE,
2169
                    constants.HMAC_CLUSTER_KEY,
2170
                   ])
2171

    
2172
  enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
2173
  for hv_name in enabled_hypervisors:
2174
    hv_class = hypervisor.GetHypervisor(hv_name)
2175
    dist_files.update(hv_class.GetAncillaryFiles())
2176

    
2177
  # 3. Perform the files upload
2178
  for fname in dist_files:
2179
    if os.path.exists(fname):
2180
      result = lu.rpc.call_upload_file(dist_nodes, fname)
2181
      for to_node, to_result in result.items():
2182
        msg = to_result.fail_msg
2183
        if msg:
2184
          msg = ("Copy of file %s to node %s failed: %s" %
2185
                 (fname, to_node, msg))
2186
          lu.proc.LogWarning(msg)
2187

    
2188

    
2189
class LURedistributeConfig(NoHooksLU):
2190
  """Force the redistribution of cluster configuration.
2191

2192
  This is a very simple LU.
2193

2194
  """
2195
  _OP_REQP = []
2196
  REQ_BGL = False
2197

    
2198
  def ExpandNames(self):
2199
    self.needed_locks = {
2200
      locking.LEVEL_NODE: locking.ALL_SET,
2201
    }
2202
    self.share_locks[locking.LEVEL_NODE] = 1
2203

    
2204
  def CheckPrereq(self):
2205
    """Check prerequisites.
2206

2207
    """
2208

    
2209
  def Exec(self, feedback_fn):
2210
    """Redistribute the configuration.
2211

2212
    """
2213
    self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
2214
    _RedistributeAncillaryFiles(self)
2215

    
2216

    
2217
def _WaitForSync(lu, instance, oneshot=False):
2218
  """Sleep and poll for an instance's disk to sync.
2219

2220
  """
2221
  if not instance.disks:
2222
    return True
2223

    
2224
  if not oneshot:
2225
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
2226

    
2227
  node = instance.primary_node
2228

    
2229
  for dev in instance.disks:
2230
    lu.cfg.SetDiskID(dev, node)
2231

    
2232
  # TODO: Convert to utils.Retry
2233

    
2234
  retries = 0
2235
  degr_retries = 10 # in seconds, as we sleep 1 second each time
2236
  while True:
2237
    max_time = 0
2238
    done = True
2239
    cumul_degraded = False
2240
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
2241
    msg = rstats.fail_msg
2242
    if msg:
2243
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
2244
      retries += 1
2245
      if retries >= 10:
2246
        raise errors.RemoteError("Can't contact node %s for mirror data,"
2247
                                 " aborting." % node)
2248
      time.sleep(6)
2249
      continue
2250
    rstats = rstats.payload
2251
    retries = 0
2252
    for i, mstat in enumerate(rstats):
2253
      if mstat is None:
2254
        lu.LogWarning("Can't compute data for node %s/%s",
2255
                           node, instance.disks[i].iv_name)
2256
        continue
2257

    
2258
      cumul_degraded = (cumul_degraded or
2259
                        (mstat.is_degraded and mstat.sync_percent is None))
2260
      if mstat.sync_percent is not None:
2261
        done = False
2262
        if mstat.estimated_time is not None:
2263
          rem_time = "%d estimated seconds remaining" % mstat.estimated_time
2264
          max_time = mstat.estimated_time
2265
        else:
2266
          rem_time = "no time estimate"
2267
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
2268
                        (instance.disks[i].iv_name, mstat.sync_percent,
2269
                         rem_time))
2270

    
2271
    # if we're done but degraded, let's do a few small retries, to
2272
    # make sure we see a stable and not transient situation; therefore
2273
    # we force restart of the loop
2274
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
2275
      logging.info("Degraded disks found, %d retries left", degr_retries)
2276
      degr_retries -= 1
2277
      time.sleep(1)
2278
      continue
2279

    
2280
    if done or oneshot:
2281
      break
2282

    
2283
    time.sleep(min(60, max_time))
2284

    
2285
  if done:
2286
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
2287
  return not cumul_degraded
2288

    
2289

    
2290
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
2291
  """Check that mirrors are not degraded.
2292

2293
  The ldisk parameter, if True, will change the test from the
2294
  is_degraded attribute (which represents overall non-ok status for
2295
  the device(s)) to the ldisk (representing the local storage status).
2296

2297
  """
2298
  lu.cfg.SetDiskID(dev, node)
2299

    
2300
  result = True
2301

    
2302
  if on_primary or dev.AssembleOnSecondary():
2303
    rstats = lu.rpc.call_blockdev_find(node, dev)
2304
    msg = rstats.fail_msg
2305
    if msg:
2306
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
2307
      result = False
2308
    elif not rstats.payload:
2309
      lu.LogWarning("Can't find disk on node %s", node)
2310
      result = False
2311
    else:
2312
      if ldisk:
2313
        result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
2314
      else:
2315
        result = result and not rstats.payload.is_degraded
2316

    
2317
  if dev.children:
2318
    for child in dev.children:
2319
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
2320

    
2321
  return result
2322

    
2323

    
2324
class LUDiagnoseOS(NoHooksLU):
2325
  """Logical unit for OS diagnose/query.
2326

2327
  """
2328
  _OP_REQP = ["output_fields", "names"]
2329
  REQ_BGL = False
2330
  _FIELDS_STATIC = utils.FieldSet()
2331
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status", "variants")
2332
  # Fields that need calculation of global os validity
2333
  _FIELDS_NEEDVALID = frozenset(["valid", "variants"])
2334

    
2335
  def ExpandNames(self):
2336
    if self.op.names:
2337
      raise errors.OpPrereqError("Selective OS query not supported",
2338
                                 errors.ECODE_INVAL)
2339

    
2340
    _CheckOutputFields(static=self._FIELDS_STATIC,
2341
                       dynamic=self._FIELDS_DYNAMIC,
2342
                       selected=self.op.output_fields)
2343

    
2344
    # Lock all nodes, in shared mode
2345
    # Temporary removal of locks, should be reverted later
2346
    # TODO: reintroduce locks when they are lighter-weight
2347
    self.needed_locks = {}
2348
    #self.share_locks[locking.LEVEL_NODE] = 1
2349
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2350

    
2351
  def CheckPrereq(self):
2352
    """Check prerequisites.
2353

2354
    """
2355

    
2356
  @staticmethod
2357
  def _DiagnoseByOS(rlist):
2358
    """Remaps a per-node return list into an a per-os per-node dictionary
2359

2360
    @param rlist: a map with node names as keys and OS objects as values
2361

2362
    @rtype: dict
2363
    @return: a dictionary with osnames as keys and as value another map, with
2364
        nodes as keys and tuples of (path, status, diagnose) as values, eg::
2365

2366
          {"debian-etch": {"node1": [(/usr/lib/..., True, ""),
2367
                                     (/srv/..., False, "invalid api")],
2368
                           "node2": [(/srv/..., True, "")]}
2369
          }
2370

2371
    """
2372
    all_os = {}
2373
    # we build here the list of nodes that didn't fail the RPC (at RPC
2374
    # level), so that nodes with a non-responding node daemon don't
2375
    # make all OSes invalid
2376
    good_nodes = [node_name for node_name in rlist
2377
                  if not rlist[node_name].fail_msg]
2378
    for node_name, nr in rlist.items():
2379
      if nr.fail_msg or not nr.payload:
2380
        continue
2381
      for name, path, status, diagnose, variants in nr.payload:
2382
        if name not in all_os:
2383
          # build a list of nodes for this os containing empty lists
2384
          # for each node in node_list
2385
          all_os[name] = {}
2386
          for nname in good_nodes:
2387
            all_os[name][nname] = []
2388
        all_os[name][node_name].append((path, status, diagnose, variants))
2389
    return all_os
2390

    
2391
  def Exec(self, feedback_fn):
2392
    """Compute the list of OSes.
2393

2394
    """
2395
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
2396
    node_data = self.rpc.call_os_diagnose(valid_nodes)
2397
    pol = self._DiagnoseByOS(node_data)
2398
    output = []
2399
    calc_valid = self._FIELDS_NEEDVALID.intersection(self.op.output_fields)
2400
    calc_variants = "variants" in self.op.output_fields
2401

    
2402
    for os_name, os_data in pol.items():
2403
      row = []
2404
      if calc_valid:
2405
        valid = True
2406
        variants = None
2407
        for osl in os_data.values():
2408
          valid = valid and osl and osl[0][1]
2409
          if not valid:
2410
            variants = None
2411
            break
2412
          if calc_variants:
2413
            node_variants = osl[0][3]
2414
            if variants is None:
2415
              variants = node_variants
2416
            else:
2417
              variants = [v for v in variants if v in node_variants]
2418

    
2419
      for field in self.op.output_fields:
2420
        if field == "name":
2421
          val = os_name
2422
        elif field == "valid":
2423
          val = valid
2424
        elif field == "node_status":
2425
          # this is just a copy of the dict
2426
          val = {}
2427
          for node_name, nos_list in os_data.items():
2428
            val[node_name] = nos_list
2429
        elif field == "variants":
2430
          val =  variants
2431
        else:
2432
          raise errors.ParameterError(field)
2433
        row.append(val)
2434
      output.append(row)
2435

    
2436
    return output
2437

    
2438

    
2439
class LURemoveNode(LogicalUnit):
2440
  """Logical unit for removing a node.
2441

2442
  """
2443
  HPATH = "node-remove"
2444
  HTYPE = constants.HTYPE_NODE
2445
  _OP_REQP = ["node_name"]
2446

    
2447
  def BuildHooksEnv(self):
2448
    """Build hooks env.
2449

2450
    This doesn't run on the target node in the pre phase as a failed
2451
    node would then be impossible to remove.
2452

2453
    """
2454
    env = {
2455
      "OP_TARGET": self.op.node_name,
2456
      "NODE_NAME": self.op.node_name,
2457
      }
2458
    all_nodes = self.cfg.GetNodeList()
2459
    try:
2460
      all_nodes.remove(self.op.node_name)
2461
    except ValueError:
2462
      logging.warning("Node %s which is about to be removed not found"
2463
                      " in the all nodes list", self.op.node_name)
2464
    return env, all_nodes, all_nodes
2465

    
2466
  def CheckPrereq(self):
2467
    """Check prerequisites.
2468

2469
    This checks:
2470
     - the node exists in the configuration
2471
     - it does not have primary or secondary instances
2472
     - it's not the master
2473

2474
    Any errors are signaled by raising errors.OpPrereqError.
2475

2476
    """
2477
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
2478
    node = self.cfg.GetNodeInfo(self.op.node_name)
2479
    assert node is not None
2480

    
2481
    instance_list = self.cfg.GetInstanceList()
2482

    
2483
    masternode = self.cfg.GetMasterNode()
2484
    if node.name == masternode:
2485
      raise errors.OpPrereqError("Node is the master node,"
2486
                                 " you need to failover first.",
2487
                                 errors.ECODE_INVAL)
2488

    
2489
    for instance_name in instance_list:
2490
      instance = self.cfg.GetInstanceInfo(instance_name)
2491
      if node.name in instance.all_nodes:
2492
        raise errors.OpPrereqError("Instance %s is still running on the node,"
2493
                                   " please remove first." % instance_name,
2494
                                   errors.ECODE_INVAL)
2495
    self.op.node_name = node.name
2496
    self.node = node
2497

    
2498
  def Exec(self, feedback_fn):
2499
    """Removes the node from the cluster.
2500

2501
    """
2502
    node = self.node
2503
    logging.info("Stopping the node daemon and removing configs from node %s",
2504
                 node.name)
2505

    
2506
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
2507

    
2508
    # Promote nodes to master candidate as needed
2509
    _AdjustCandidatePool(self, exceptions=[node.name])
2510
    self.context.RemoveNode(node.name)
2511

    
2512
    # Run post hooks on the node before it's removed
2513
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
2514
    try:
2515
      hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
2516
    except:
2517
      # pylint: disable-msg=W0702
2518
      self.LogWarning("Errors occurred running hooks on %s" % node.name)
2519

    
2520
    result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
2521
    msg = result.fail_msg
2522
    if msg:
2523
      self.LogWarning("Errors encountered on the remote node while leaving"
2524
                      " the cluster: %s", msg)
2525

    
2526

    
2527
class LUQueryNodes(NoHooksLU):
2528
  """Logical unit for querying nodes.
2529

2530
  """
2531
  # pylint: disable-msg=W0142
2532
  _OP_REQP = ["output_fields", "names", "use_locking"]
2533
  REQ_BGL = False
2534

    
2535
  _SIMPLE_FIELDS = ["name", "serial_no", "ctime", "mtime", "uuid",
2536
                    "master_candidate", "offline", "drained"]
2537

    
2538
  _FIELDS_DYNAMIC = utils.FieldSet(
2539
    "dtotal", "dfree",
2540
    "mtotal", "mnode", "mfree",
2541
    "bootid",
2542
    "ctotal", "cnodes", "csockets",
2543
    )
2544

    
2545
  _FIELDS_STATIC = utils.FieldSet(*[
2546
    "pinst_cnt", "sinst_cnt",
2547
    "pinst_list", "sinst_list",
2548
    "pip", "sip", "tags",
2549
    "master",
2550
    "role"] + _SIMPLE_FIELDS
2551
    )
2552

    
2553
  def ExpandNames(self):
2554
    _CheckOutputFields(static=self._FIELDS_STATIC,
2555
                       dynamic=self._FIELDS_DYNAMIC,
2556
                       selected=self.op.output_fields)
2557

    
2558
    self.needed_locks = {}
2559
    self.share_locks[locking.LEVEL_NODE] = 1
2560

    
2561
    if self.op.names:
2562
      self.wanted = _GetWantedNodes(self, self.op.names)
2563
    else:
2564
      self.wanted = locking.ALL_SET
2565

    
2566
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
2567
    self.do_locking = self.do_node_query and self.op.use_locking
2568
    if self.do_locking:
2569
      # if we don't request only static fields, we need to lock the nodes
2570
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
2571

    
2572
  def CheckPrereq(self):
2573
    """Check prerequisites.
2574

2575
    """
2576
    # The validation of the node list is done in the _GetWantedNodes,
2577
    # if non empty, and if empty, there's no validation to do
2578
    pass
2579

    
2580
  def Exec(self, feedback_fn):
2581
    """Computes the list of nodes and their attributes.
2582

2583
    """
2584
    all_info = self.cfg.GetAllNodesInfo()
2585
    if self.do_locking:
2586
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
2587
    elif self.wanted != locking.ALL_SET:
2588
      nodenames = self.wanted
2589
      missing = set(nodenames).difference(all_info.keys())
2590
      if missing:
2591
        raise errors.OpExecError(
2592
          "Some nodes were removed before retrieving their data: %s" % missing)
2593
    else:
2594
      nodenames = all_info.keys()
2595

    
2596
    nodenames = utils.NiceSort(nodenames)
2597
    nodelist = [all_info[name] for name in nodenames]
2598

    
2599
    # begin data gathering
2600

    
2601
    if self.do_node_query:
2602
      live_data = {}
2603
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
2604
                                          self.cfg.GetHypervisorType())
2605
      for name in nodenames:
2606
        nodeinfo = node_data[name]
2607
        if not nodeinfo.fail_msg and nodeinfo.payload:
2608
          nodeinfo = nodeinfo.payload
2609
          fn = utils.TryConvert
2610
          live_data[name] = {
2611
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
2612
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
2613
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
2614
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
2615
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
2616
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
2617
            "bootid": nodeinfo.get('bootid', None),
2618
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
2619
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
2620
            }
2621
        else:
2622
          live_data[name] = {}
2623
    else:
2624
      live_data = dict.fromkeys(nodenames, {})
2625

    
2626
    node_to_primary = dict([(name, set()) for name in nodenames])
2627
    node_to_secondary = dict([(name, set()) for name in nodenames])
2628

    
2629
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
2630
                             "sinst_cnt", "sinst_list"))
2631
    if inst_fields & frozenset(self.op.output_fields):
2632
      inst_data = self.cfg.GetAllInstancesInfo()
2633

    
2634
      for inst in inst_data.values():
2635
        if inst.primary_node in node_to_primary:
2636
          node_to_primary[inst.primary_node].add(inst.name)
2637
        for secnode in inst.secondary_nodes:
2638
          if secnode in node_to_secondary:
2639
            node_to_secondary[secnode].add(inst.name)
2640

    
2641
    master_node = self.cfg.GetMasterNode()
2642

    
2643
    # end data gathering
2644

    
2645
    output = []
2646
    for node in nodelist:
2647
      node_output = []
2648
      for field in self.op.output_fields:
2649
        if field in self._SIMPLE_FIELDS:
2650
          val = getattr(node, field)
2651
        elif field == "pinst_list":
2652
          val = list(node_to_primary[node.name])
2653
        elif field == "sinst_list":
2654
          val = list(node_to_secondary[node.name])
2655
        elif field == "pinst_cnt":
2656
          val = len(node_to_primary[node.name])
2657
        elif field == "sinst_cnt":
2658
          val = len(node_to_secondary[node.name])
2659
        elif field == "pip":
2660
          val = node.primary_ip
2661
        elif field == "sip":
2662
          val = node.secondary_ip
2663
        elif field == "tags":
2664
          val = list(node.GetTags())
2665
        elif field == "master":
2666
          val = node.name == master_node
2667
        elif self._FIELDS_DYNAMIC.Matches(field):
2668
          val = live_data[node.name].get(field, None)
2669
        elif field == "role":
2670
          if node.name == master_node:
2671
            val = "M"
2672
          elif node.master_candidate:
2673
            val = "C"
2674
          elif node.drained:
2675
            val = "D"
2676
          elif node.offline:
2677
            val = "O"
2678
          else:
2679
            val = "R"
2680
        else:
2681
          raise errors.ParameterError(field)
2682
        node_output.append(val)
2683
      output.append(node_output)
2684

    
2685
    return output
2686

    
2687

    
2688
class LUQueryNodeVolumes(NoHooksLU):
2689
  """Logical unit for getting volumes on node(s).
2690

2691
  """
2692
  _OP_REQP = ["nodes", "output_fields"]
2693
  REQ_BGL = False
2694
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
2695
  _FIELDS_STATIC = utils.FieldSet("node")
2696

    
2697
  def ExpandNames(self):
2698
    _CheckOutputFields(static=self._FIELDS_STATIC,
2699
                       dynamic=self._FIELDS_DYNAMIC,
2700
                       selected=self.op.output_fields)
2701

    
2702
    self.needed_locks = {}
2703
    self.share_locks[locking.LEVEL_NODE] = 1
2704
    if not self.op.nodes:
2705
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2706
    else:
2707
      self.needed_locks[locking.LEVEL_NODE] = \
2708
        _GetWantedNodes(self, self.op.nodes)
2709

    
2710
  def CheckPrereq(self):
2711
    """Check prerequisites.
2712

2713
    This checks that the fields required are valid output fields.
2714

2715
    """
2716
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
2717

    
2718
  def Exec(self, feedback_fn):
2719
    """Computes the list of nodes and their attributes.
2720

2721
    """
2722
    nodenames = self.nodes
2723
    volumes = self.rpc.call_node_volumes(nodenames)
2724

    
2725
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
2726
             in self.cfg.GetInstanceList()]
2727

    
2728
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
2729

    
2730
    output = []
2731
    for node in nodenames:
2732
      nresult = volumes[node]
2733
      if nresult.offline:
2734
        continue
2735
      msg = nresult.fail_msg
2736
      if msg:
2737
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
2738
        continue
2739

    
2740
      node_vols = nresult.payload[:]
2741
      node_vols.sort(key=lambda vol: vol['dev'])
2742

    
2743
      for vol in node_vols:
2744
        node_output = []
2745
        for field in self.op.output_fields:
2746
          if field == "node":
2747
            val = node
2748
          elif field == "phys":
2749
            val = vol['dev']
2750
          elif field == "vg":
2751
            val = vol['vg']
2752
          elif field == "name":
2753
            val = vol['name']
2754
          elif field == "size":
2755
            val = int(float(vol['size']))
2756
          elif field == "instance":
2757
            for inst in ilist:
2758
              if node not in lv_by_node[inst]:
2759
                continue
2760
              if vol['name'] in lv_by_node[inst][node]:
2761
                val = inst.name
2762
                break
2763
            else:
2764
              val = '-'
2765
          else:
2766
            raise errors.ParameterError(field)
2767
          node_output.append(str(val))
2768

    
2769
        output.append(node_output)
2770

    
2771
    return output
2772

    
2773

    
2774
class LUQueryNodeStorage(NoHooksLU):
2775
  """Logical unit for getting information on storage units on node(s).
2776

2777
  """
2778
  _OP_REQP = ["nodes", "storage_type", "output_fields"]
2779
  REQ_BGL = False
2780
  _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
2781

    
2782
  def ExpandNames(self):
2783
    storage_type = self.op.storage_type
2784

    
2785
    if storage_type not in constants.VALID_STORAGE_TYPES:
2786
      raise errors.OpPrereqError("Unknown storage type: %s" % storage_type,
2787
                                 errors.ECODE_INVAL)
2788

    
2789
    _CheckOutputFields(static=self._FIELDS_STATIC,
2790
                       dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
2791
                       selected=self.op.output_fields)
2792

    
2793
    self.needed_locks = {}
2794
    self.share_locks[locking.LEVEL_NODE] = 1
2795

    
2796
    if self.op.nodes:
2797
      self.needed_locks[locking.LEVEL_NODE] = \
2798
        _GetWantedNodes(self, self.op.nodes)
2799
    else:
2800
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2801

    
2802
  def CheckPrereq(self):
2803
    """Check prerequisites.
2804

2805
    This checks that the fields required are valid output fields.
2806

2807
    """
2808
    self.op.name = getattr(self.op, "name", None)
2809

    
2810
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
2811

    
2812
  def Exec(self, feedback_fn):
2813
    """Computes the list of nodes and their attributes.
2814

2815
    """
2816
    # Always get name to sort by
2817
    if constants.SF_NAME in self.op.output_fields:
2818
      fields = self.op.output_fields[:]
2819
    else:
2820
      fields = [constants.SF_NAME] + self.op.output_fields
2821

    
2822
    # Never ask for node or type as it's only known to the LU
2823
    for extra in [constants.SF_NODE, constants.SF_TYPE]:
2824
      while extra in fields:
2825
        fields.remove(extra)
2826

    
2827
    field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
2828
    name_idx = field_idx[constants.SF_NAME]
2829

    
2830
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
2831
    data = self.rpc.call_storage_list(self.nodes,
2832
                                      self.op.storage_type, st_args,
2833
                                      self.op.name, fields)
2834

    
2835
    result = []
2836

    
2837
    for node in utils.NiceSort(self.nodes):
2838
      nresult = data[node]
2839
      if nresult.offline:
2840
        continue
2841

    
2842
      msg = nresult.fail_msg
2843
      if msg:
2844
        self.LogWarning("Can't get storage data from node %s: %s", node, msg)
2845
        continue
2846

    
2847
      rows = dict([(row[name_idx], row) for row in nresult.payload])
2848

    
2849
      for name in utils.NiceSort(rows.keys()):
2850
        row = rows[name]
2851

    
2852
        out = []
2853

    
2854
        for field in self.op.output_fields:
2855
          if field == constants.SF_NODE:
2856
            val = node
2857
          elif field == constants.SF_TYPE:
2858
            val = self.op.storage_type
2859
          elif field in field_idx:
2860
            val = row[field_idx[field]]
2861
          else:
2862
            raise errors.ParameterError(field)
2863

    
2864
          out.append(val)
2865

    
2866
        result.append(out)
2867

    
2868
    return result
2869

    
2870

    
2871
class LUModifyNodeStorage(NoHooksLU):
2872
  """Logical unit for modifying a storage volume on a node.
2873

2874
  """
2875
  _OP_REQP = ["node_name", "storage_type", "name", "changes"]
2876
  REQ_BGL = False
2877

    
2878
  def CheckArguments(self):
2879
    self.opnode_name = _ExpandNodeName(self.cfg, self.op.node_name)
2880

    
2881
    storage_type = self.op.storage_type
2882
    if storage_type not in constants.VALID_STORAGE_TYPES:
2883
      raise errors.OpPrereqError("Unknown storage type: %s" % storage_type,
2884
                                 errors.ECODE_INVAL)
2885

    
2886
  def ExpandNames(self):
2887
    self.needed_locks = {
2888
      locking.LEVEL_NODE: self.op.node_name,
2889
      }
2890

    
2891
  def CheckPrereq(self):
2892
    """Check prerequisites.
2893

2894
    """
2895
    storage_type = self.op.storage_type
2896

    
2897
    try:
2898
      modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
2899
    except KeyError:
2900
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
2901
                                 " modified" % storage_type,
2902
                                 errors.ECODE_INVAL)
2903

    
2904
    diff = set(self.op.changes.keys()) - modifiable
2905
    if diff:
2906
      raise errors.OpPrereqError("The following fields can not be modified for"
2907
                                 " storage units of type '%s': %r" %
2908
                                 (storage_type, list(diff)),
2909
                                 errors.ECODE_INVAL)
2910

    
2911
  def Exec(self, feedback_fn):
2912
    """Computes the list of nodes and their attributes.
2913

2914
    """
2915
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
2916
    result = self.rpc.call_storage_modify(self.op.node_name,
2917
                                          self.op.storage_type, st_args,
2918
                                          self.op.name, self.op.changes)
2919
    result.Raise("Failed to modify storage unit '%s' on %s" %
2920
                 (self.op.name, self.op.node_name))
2921

    
2922

    
2923
class LUAddNode(LogicalUnit):
2924
  """Logical unit for adding node to the cluster.
2925

2926
  """
2927
  HPATH = "node-add"
2928
  HTYPE = constants.HTYPE_NODE
2929
  _OP_REQP = ["node_name"]
2930

    
2931
  def CheckArguments(self):
2932
    # validate/normalize the node name
2933
    self.op.node_name = utils.HostInfo.NormalizeName(self.op.node_name)
2934

    
2935
  def BuildHooksEnv(self):
2936
    """Build hooks env.
2937

2938
    This will run on all nodes before, and on all nodes + the new node after.
2939

2940
    """
2941
    env = {
2942
      "OP_TARGET": self.op.node_name,
2943
      "NODE_NAME": self.op.node_name,
2944
      "NODE_PIP": self.op.primary_ip,
2945
      "NODE_SIP": self.op.secondary_ip,
2946
      }
2947
    nodes_0 = self.cfg.GetNodeList()
2948
    nodes_1 = nodes_0 + [self.op.node_name, ]
2949
    return env, nodes_0, nodes_1
2950

    
2951
  def CheckPrereq(self):
2952
    """Check prerequisites.
2953

2954
    This checks:
2955
     - the new node is not already in the config
2956
     - it is resolvable
2957
     - its parameters (single/dual homed) matches the cluster
2958

2959
    Any errors are signaled by raising errors.OpPrereqError.
2960

2961
    """
2962
    node_name = self.op.node_name
2963
    cfg = self.cfg
2964

    
2965
    dns_data = utils.GetHostInfo(node_name)
2966

    
2967
    node = dns_data.name
2968
    primary_ip = self.op.primary_ip = dns_data.ip
2969
    secondary_ip = getattr(self.op, "secondary_ip", None)
2970
    if secondary_ip is None:
2971
      secondary_ip = primary_ip
2972
    if not utils.IsValidIP(secondary_ip):
2973
      raise errors.OpPrereqError("Invalid secondary IP given",
2974
                                 errors.ECODE_INVAL)
2975
    self.op.secondary_ip = secondary_ip
2976

    
2977
    node_list = cfg.GetNodeList()
2978
    if not self.op.readd and node in node_list:
2979
      raise errors.OpPrereqError("Node %s is already in the configuration" %
2980
                                 node, errors.ECODE_EXISTS)
2981
    elif self.op.readd and node not in node_list:
2982
      raise errors.OpPrereqError("Node %s is not in the configuration" % node,
2983
                                 errors.ECODE_NOENT)
2984

    
2985
    for existing_node_name in node_list:
2986
      existing_node = cfg.GetNodeInfo(existing_node_name)
2987

    
2988
      if self.op.readd and node == existing_node_name:
2989
        if (existing_node.primary_ip != primary_ip or
2990
            existing_node.secondary_ip != secondary_ip):
2991
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
2992
                                     " address configuration as before",
2993
                                     errors.ECODE_INVAL)
2994
        continue
2995

    
2996
      if (existing_node.primary_ip == primary_ip or
2997
          existing_node.secondary_ip == primary_ip or
2998
          existing_node.primary_ip == secondary_ip or
2999
          existing_node.secondary_ip == secondary_ip):
3000
        raise errors.OpPrereqError("New node ip address(es) conflict with"
3001
                                   " existing node %s" % existing_node.name,
3002
                                   errors.ECODE_NOTUNIQUE)
3003

    
3004
    # check that the type of the node (single versus dual homed) is the
3005
    # same as for the master
3006
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
3007
    master_singlehomed = myself.secondary_ip == myself.primary_ip
3008
    newbie_singlehomed = secondary_ip == primary_ip
3009
    if master_singlehomed != newbie_singlehomed:
3010
      if master_singlehomed:
3011
        raise errors.OpPrereqError("The master has no private ip but the"
3012
                                   " new node has one",
3013
                                   errors.ECODE_INVAL)
3014
      else:
3015
        raise errors.OpPrereqError("The master has a private ip but the"
3016
                                   " new node doesn't have one",
3017
                                   errors.ECODE_INVAL)
3018

    
3019
    # checks reachability
3020
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
3021
      raise errors.OpPrereqError("Node not reachable by ping",
3022
                                 errors.ECODE_ENVIRON)
3023

    
3024
    if not newbie_singlehomed:
3025
      # check reachability from my secondary ip to newbie's secondary ip
3026
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
3027
                           source=myself.secondary_ip):
3028
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
3029
                                   " based ping to noded port",
3030
                                   errors.ECODE_ENVIRON)
3031

    
3032
    if self.op.readd:
3033
      exceptions = [node]
3034
    else:
3035
      exceptions = []
3036

    
3037
    self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
3038

    
3039
    if self.op.readd:
3040
      self.new_node = self.cfg.GetNodeInfo(node)
3041
      assert self.new_node is not None, "Can't retrieve locked node %s" % node
3042
    else:
3043
      self.new_node = objects.Node(name=node,
3044
                                   primary_ip=primary_ip,
3045
                                   secondary_ip=secondary_ip,
3046
                                   master_candidate=self.master_candidate,
3047
                                   offline=False, drained=False)
3048

    
3049
  def Exec(self, feedback_fn):
3050
    """Adds the new node to the cluster.
3051

3052
    """
3053
    new_node = self.new_node
3054
    node = new_node.name
3055

    
3056
    # for re-adds, reset the offline/drained/master-candidate flags;
3057
    # we need to reset here, otherwise offline would prevent RPC calls
3058
    # later in the procedure; this also means that if the re-add
3059
    # fails, we are left with a non-offlined, broken node
3060
    if self.op.readd:
3061
      new_node.drained = new_node.offline = False # pylint: disable-msg=W0201
3062
      self.LogInfo("Readding a node, the offline/drained flags were reset")
3063
      # if we demote the node, we do cleanup later in the procedure
3064
      new_node.master_candidate = self.master_candidate
3065

    
3066
    # notify the user about any possible mc promotion
3067
    if new_node.master_candidate:
3068
      self.LogInfo("Node will be a master candidate")
3069

    
3070
    # check connectivity
3071
    result = self.rpc.call_version([node])[node]
3072
    result.Raise("Can't get version information from node %s" % node)
3073
    if constants.PROTOCOL_VERSION == result.payload:
3074
      logging.info("Communication to node %s fine, sw version %s match",
3075
                   node, result.payload)
3076
    else:
3077
      raise errors.OpExecError("Version mismatch master version %s,"
3078
                               " node version %s" %
3079
                               (constants.PROTOCOL_VERSION, result.payload))
3080

    
3081
    # setup ssh on node
3082
    if self.cfg.GetClusterInfo().modify_ssh_setup:
3083
      logging.info("Copy ssh key to node %s", node)
3084
      priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
3085
      keyarray = []
3086
      keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
3087
                  constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
3088
                  priv_key, pub_key]
3089

    
3090
      for i in keyfiles:
3091
        keyarray.append(utils.ReadFile(i))
3092

    
3093
      result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
3094
                                      keyarray[2], keyarray[3], keyarray[4],
3095
                                      keyarray[5])
3096
      result.Raise("Cannot transfer ssh keys to the new node")
3097

    
3098
    # Add node to our /etc/hosts, and add key to known_hosts
3099
    if self.cfg.GetClusterInfo().modify_etc_hosts:
3100
      utils.AddHostToEtcHosts(new_node.name)
3101

    
3102
    if new_node.secondary_ip != new_node.primary_ip:
3103
      result = self.rpc.call_node_has_ip_address(new_node.name,
3104
                                                 new_node.secondary_ip)
3105
      result.Raise("Failure checking secondary ip on node %s" % new_node.name,
3106
                   prereq=True, ecode=errors.ECODE_ENVIRON)
3107
      if not result.payload:
3108
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
3109
                                 " you gave (%s). Please fix and re-run this"
3110
                                 " command." % new_node.secondary_ip)
3111

    
3112
    node_verify_list = [self.cfg.GetMasterNode()]
3113
    node_verify_param = {
3114
      constants.NV_NODELIST: [node],
3115
      # TODO: do a node-net-test as well?
3116
    }
3117

    
3118
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
3119
                                       self.cfg.GetClusterName())
3120
    for verifier in node_verify_list:
3121
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
3122
      nl_payload = result[verifier].payload[constants.NV_NODELIST]
3123
      if nl_payload:
3124
        for failed in nl_payload:
3125
          feedback_fn("ssh/hostname verification failed"
3126
                      " (checking from %s): %s" %
3127
                      (verifier, nl_payload[failed]))
3128
        raise errors.OpExecError("ssh/hostname verification failed.")
3129

    
3130
    if self.op.readd:
3131
      _RedistributeAncillaryFiles(self)
3132
      self.context.ReaddNode(new_node)
3133
      # make sure we redistribute the config
3134
      self.cfg.Update(new_node, feedback_fn)
3135
      # and make sure the new node will not have old files around
3136
      if not new_node.master_candidate:
3137
        result = self.rpc.call_node_demote_from_mc(new_node.name)
3138
        msg = result.fail_msg
3139
        if msg:
3140
          self.LogWarning("Node failed to demote itself from master"
3141
                          " candidate status: %s" % msg)
3142
    else:
3143
      _RedistributeAncillaryFiles(self, additional_nodes=[node])
3144
      self.context.AddNode(new_node, self.proc.GetECId())
3145

    
3146

    
3147
class LUSetNodeParams(LogicalUnit):
3148
  """Modifies the parameters of a node.
3149

3150
  """
3151
  HPATH = "node-modify"
3152
  HTYPE = constants.HTYPE_NODE
3153
  _OP_REQP = ["node_name"]
3154
  REQ_BGL = False
3155

    
3156
  def CheckArguments(self):
3157
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3158
    _CheckBooleanOpField(self.op, 'master_candidate')
3159
    _CheckBooleanOpField(self.op, 'offline')
3160
    _CheckBooleanOpField(self.op, 'drained')
3161
    _CheckBooleanOpField(self.op, 'auto_promote')
3162
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
3163
    if all_mods.count(None) == 3:
3164
      raise errors.OpPrereqError("Please pass at least one modification",
3165
                                 errors.ECODE_INVAL)
3166
    if all_mods.count(True) > 1:
3167
      raise errors.OpPrereqError("Can't set the node into more than one"
3168
                                 " state at the same time",
3169
                                 errors.ECODE_INVAL)
3170

    
3171
    # Boolean value that tells us whether we're offlining or draining the node
3172
    self.offline_or_drain = (self.op.offline == True or
3173
                             self.op.drained == True)
3174
    self.deoffline_or_drain = (self.op.offline == False or
3175
                               self.op.drained == False)
3176
    self.might_demote = (self.op.master_candidate == False or
3177
                         self.offline_or_drain)
3178

    
3179
    self.lock_all = self.op.auto_promote and self.might_demote
3180

    
3181

    
3182
  def ExpandNames(self):
3183
    if self.lock_all:
3184
      self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
3185
    else:
3186
      self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
3187

    
3188
  def BuildHooksEnv(self):
3189
    """Build hooks env.
3190

3191
    This runs on the master node.
3192

3193
    """
3194
    env = {
3195
      "OP_TARGET": self.op.node_name,
3196
      "MASTER_CANDIDATE": str(self.op.master_candidate),
3197
      "OFFLINE": str(self.op.offline),
3198
      "DRAINED": str(self.op.drained),
3199
      }
3200
    nl = [self.cfg.GetMasterNode(),
3201
          self.op.node_name]
3202
    return env, nl, nl
3203

    
3204
  def CheckPrereq(self):
3205
    """Check prerequisites.
3206

3207
    This only checks the instance list against the existing names.
3208

3209
    """
3210
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
3211

    
3212
    if (self.op.master_candidate is not None or
3213
        self.op.drained is not None or
3214
        self.op.offline is not None):
3215
      # we can't change the master's node flags
3216
      if self.op.node_name == self.cfg.GetMasterNode():
3217
        raise errors.OpPrereqError("The master role can be changed"
3218
                                   " only via masterfailover",
3219
                                   errors.ECODE_INVAL)
3220

    
3221

    
3222
    if node.master_candidate and self.might_demote and not self.lock_all:
3223
      assert not self.op.auto_promote, "auto-promote set but lock_all not"
3224
      # check if after removing the current node, we're missing master
3225
      # candidates
3226
      (mc_remaining, mc_should, _) = \
3227
          self.cfg.GetMasterCandidateStats(exceptions=[node.name])
3228
      if mc_remaining != mc_should:
3229
        raise errors.OpPrereqError("Not enough master candidates, please"
3230
                                   " pass auto_promote to allow promotion",
3231
                                   errors.ECODE_INVAL)
3232

    
3233
    if (self.op.master_candidate == True and
3234
        ((node.offline and not self.op.offline == False) or
3235
         (node.drained and not self.op.drained == False))):
3236
      raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
3237
                                 " to master_candidate" % node.name,
3238
                                 errors.ECODE_INVAL)
3239

    
3240
    # If we're being deofflined/drained, we'll MC ourself if needed
3241
    if (self.deoffline_or_drain and not self.offline_or_drain and not
3242
        self.op.master_candidate == True and not node.master_candidate):
3243
      self.op.master_candidate = _DecideSelfPromotion(self)
3244
      if self.op.master_candidate:
3245
        self.LogInfo("Autopromoting node to master candidate")
3246

    
3247
    return
3248

    
3249
  def Exec(self, feedback_fn):
3250
    """Modifies a node.
3251

3252
    """
3253
    node = self.node
3254

    
3255
    result = []
3256
    changed_mc = False
3257

    
3258
    if self.op.offline is not None:
3259
      node.offline = self.op.offline
3260
      result.append(("offline", str(self.op.offline)))
3261
      if self.op.offline == True:
3262
        if node.master_candidate:
3263
          node.master_candidate = False
3264
          changed_mc = True
3265
          result.append(("master_candidate", "auto-demotion due to offline"))
3266
        if node.drained:
3267
          node.drained = False
3268
          result.append(("drained", "clear drained status due to offline"))
3269

    
3270
    if self.op.master_candidate is not None:
3271
      node.master_candidate = self.op.master_candidate
3272
      changed_mc = True
3273
      result.append(("master_candidate", str(self.op.master_candidate)))
3274
      if self.op.master_candidate == False:
3275
        rrc = self.rpc.call_node_demote_from_mc(node.name)
3276
        msg = rrc.fail_msg
3277
        if msg:
3278
          self.LogWarning("Node failed to demote itself: %s" % msg)
3279

    
3280
    if self.op.drained is not None:
3281
      node.drained = self.op.drained
3282
      result.append(("drained", str(self.op.drained)))
3283
      if self.op.drained == True:
3284
        if node.master_candidate:
3285
          node.master_candidate = False
3286
          changed_mc = True
3287
          result.append(("master_candidate", "auto-demotion due to drain"))
3288
          rrc = self.rpc.call_node_demote_from_mc(node.name)
3289
          msg = rrc.fail_msg
3290
          if msg:
3291
            self.LogWarning("Node failed to demote itself: %s" % msg)
3292
        if node.offline:
3293
          node.offline = False
3294
          result.append(("offline", "clear offline status due to drain"))
3295

    
3296
    # we locked all nodes, we adjust the CP before updating this node
3297
    if self.lock_all:
3298
      _AdjustCandidatePool(self, [node.name])
3299

    
3300
    # this will trigger configuration file update, if needed
3301
    self.cfg.Update(node, feedback_fn)
3302

    
3303
    # this will trigger job queue propagation or cleanup
3304
    if changed_mc:
3305
      self.context.ReaddNode(node)
3306

    
3307
    return result
3308

    
3309

    
3310
class LUPowercycleNode(NoHooksLU):
3311
  """Powercycles a node.
3312

3313
  """
3314
  _OP_REQP = ["node_name", "force"]
3315
  REQ_BGL = False
3316

    
3317
  def CheckArguments(self):
3318
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3319
    if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
3320
      raise errors.OpPrereqError("The node is the master and the force"
3321
                                 " parameter was not set",
3322
                                 errors.ECODE_INVAL)
3323

    
3324
  def ExpandNames(self):
3325
    """Locking for PowercycleNode.
3326

3327
    This is a last-resort option and shouldn't block on other
3328
    jobs. Therefore, we grab no locks.
3329

3330
    """
3331
    self.needed_locks = {}
3332

    
3333
  def CheckPrereq(self):
3334
    """Check prerequisites.
3335

3336
    This LU has no prereqs.
3337

3338
    """
3339
    pass
3340

    
3341
  def Exec(self, feedback_fn):
3342
    """Reboots a node.
3343

3344
    """
3345
    result = self.rpc.call_node_powercycle(self.op.node_name,
3346
                                           self.cfg.GetHypervisorType())
3347
    result.Raise("Failed to schedule the reboot")
3348
    return result.payload
3349

    
3350

    
3351
class LUQueryClusterInfo(NoHooksLU):
3352
  """Query cluster configuration.
3353

3354
  """
3355
  _OP_REQP = []
3356
  REQ_BGL = False
3357

    
3358
  def ExpandNames(self):
3359
    self.needed_locks = {}
3360

    
3361
  def CheckPrereq(self):
3362
    """No prerequsites needed for this LU.
3363

3364
    """
3365
    pass
3366

    
3367
  def Exec(self, feedback_fn):
3368
    """Return cluster config.
3369

3370
    """
3371
    cluster = self.cfg.GetClusterInfo()
3372
    os_hvp = {}
3373

    
3374
    # Filter just for enabled hypervisors
3375
    for os_name, hv_dict in cluster.os_hvp.items():
3376
      os_hvp[os_name] = {}
3377
      for hv_name, hv_params in hv_dict.items():
3378
        if hv_name in cluster.enabled_hypervisors:
3379
          os_hvp[os_name][hv_name] = hv_params
3380

    
3381
    result = {
3382
      "software_version": constants.RELEASE_VERSION,
3383
      "protocol_version": constants.PROTOCOL_VERSION,
3384
      "config_version": constants.CONFIG_VERSION,
3385
      "os_api_version": max(constants.OS_API_VERSIONS),
3386
      "export_version": constants.EXPORT_VERSION,
3387
      "architecture": (platform.architecture()[0], platform.machine()),
3388
      "name": cluster.cluster_name,
3389
      "master": cluster.master_node,
3390
      "default_hypervisor": cluster.enabled_hypervisors[0],
3391
      "enabled_hypervisors": cluster.enabled_hypervisors,
3392
      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
3393
                        for hypervisor_name in cluster.enabled_hypervisors]),
3394
      "os_hvp": os_hvp,
3395
      "beparams": cluster.beparams,
3396
      "nicparams": cluster.nicparams,
3397
      "candidate_pool_size": cluster.candidate_pool_size,
3398
      "master_netdev": cluster.master_netdev,
3399
      "volume_group_name": cluster.volume_group_name,
3400
      "file_storage_dir": cluster.file_storage_dir,
3401
      "ctime": cluster.ctime,
3402
      "mtime": cluster.mtime,
3403
      "uuid": cluster.uuid,
3404
      "tags": list(cluster.GetTags()),
3405
      }
3406

    
3407
    return result
3408

    
3409

    
3410
class LUQueryConfigValues(NoHooksLU):
3411
  """Return configuration values.
3412

3413
  """
3414
  _OP_REQP = []
3415
  REQ_BGL = False
3416
  _FIELDS_DYNAMIC = utils.FieldSet()
3417
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag",
3418
                                  "watcher_pause")
3419

    
3420
  def ExpandNames(self):
3421
    self.needed_locks = {}
3422

    
3423
    _CheckOutputFields(static=self._FIELDS_STATIC,
3424
                       dynamic=self._FIELDS_DYNAMIC,
3425
                       selected=self.op.output_fields)
3426

    
3427
  def CheckPrereq(self):
3428
    """No prerequisites.
3429

3430
    """
3431
    pass
3432

    
3433
  def Exec(self, feedback_fn):
3434
    """Dump a representation of the cluster config to the standard output.
3435

3436
    """
3437
    values = []
3438
    for field in self.op.output_fields:
3439
      if field == "cluster_name":
3440
        entry = self.cfg.GetClusterName()
3441
      elif field == "master_node":
3442
        entry = self.cfg.GetMasterNode()
3443
      elif field == "drain_flag":
3444
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
3445
      elif field == "watcher_pause":
3446
        entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
3447
      else:
3448
        raise errors.ParameterError(field)
3449
      values.append(entry)
3450
    return values
3451

    
3452

    
3453
class LUActivateInstanceDisks(NoHooksLU):
3454
  """Bring up an instance's disks.
3455

3456
  """
3457
  _OP_REQP = ["instance_name"]
3458
  REQ_BGL = False
3459

    
3460
  def ExpandNames(self):
3461
    self._ExpandAndLockInstance()
3462
    self.needed_locks[locking.LEVEL_NODE] = []
3463
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3464

    
3465
  def DeclareLocks(self, level):
3466
    if level == locking.LEVEL_NODE:
3467
      self._LockInstancesNodes()
3468

    
3469
  def CheckPrereq(self):
3470
    """Check prerequisites.
3471

3472
    This checks that the instance is in the cluster.
3473

3474
    """
3475
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3476
    assert self.instance is not None, \
3477
      "Cannot retrieve locked instance %s" % self.op.instance_name
3478
    _CheckNodeOnline(self, self.instance.primary_node)
3479
    if not hasattr(self.op, "ignore_size"):
3480
      self.op.ignore_size = False
3481

    
3482
  def Exec(self, feedback_fn):
3483
    """Activate the disks.
3484

3485
    """
3486
    disks_ok, disks_info = \
3487
              _AssembleInstanceDisks(self, self.instance,
3488
                                     ignore_size=self.op.ignore_size)
3489
    if not disks_ok:
3490
      raise errors.OpExecError("Cannot activate block devices")
3491

    
3492
    return disks_info
3493

    
3494

    
3495
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False,
3496
                           ignore_size=False):
3497
  """Prepare the block devices for an instance.
3498

3499
  This sets up the block devices on all nodes.
3500

3501
  @type lu: L{LogicalUnit}
3502
  @param lu: the logical unit on whose behalf we execute
3503
  @type instance: L{objects.Instance}
3504
  @param instance: the instance for whose disks we assemble
3505
  @type ignore_secondaries: boolean
3506
  @param ignore_secondaries: if true, errors on secondary nodes
3507
      won't result in an error return from the function
3508
  @type ignore_size: boolean
3509
  @param ignore_size: if true, the current known size of the disk
3510
      will not be used during the disk activation, useful for cases
3511
      when the size is wrong
3512
  @return: False if the operation failed, otherwise a list of
3513
      (host, instance_visible_name, node_visible_name)
3514
      with the mapping from node devices to instance devices
3515

3516
  """
3517
  device_info = []
3518
  disks_ok = True
3519
  iname = instance.name
3520
  # With the two passes mechanism we try to reduce the window of
3521
  # opportunity for the race condition of switching DRBD to primary
3522
  # before handshaking occured, but we do not eliminate it
3523

    
3524
  # The proper fix would be to wait (with some limits) until the
3525
  # connection has been made and drbd transitions from WFConnection
3526
  # into any other network-connected state (Connected, SyncTarget,
3527
  # SyncSource, etc.)
3528

    
3529
  # 1st pass, assemble on all nodes in secondary mode
3530
  for inst_disk in instance.disks:
3531
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
3532
      if ignore_size:
3533
        node_disk = node_disk.Copy()
3534
        node_disk.UnsetSize()
3535
      lu.cfg.SetDiskID(node_disk, node)
3536
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
3537
      msg = result.fail_msg
3538
      if msg:
3539
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
3540
                           " (is_primary=False, pass=1): %s",
3541
                           inst_disk.iv_name, node, msg)
3542
        if not ignore_secondaries:
3543
          disks_ok = False
3544

    
3545
  # FIXME: race condition on drbd migration to primary
3546

    
3547
  # 2nd pass, do only the primary node
3548
  for inst_disk in instance.disks:
3549
    dev_path = None
3550

    
3551
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
3552
      if node != instance.primary_node:
3553
        continue
3554
      if ignore_size:
3555
        node_disk = node_disk.Copy()
3556
        node_disk.UnsetSize()
3557
      lu.cfg.SetDiskID(node_disk, node)
3558
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
3559
      msg = result.fail_msg
3560
      if msg:
3561
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
3562
                           " (is_primary=True, pass=2): %s",
3563
                           inst_disk.iv_name, node, msg)
3564
        disks_ok = False
3565
      else:
3566
        dev_path = result.payload
3567

    
3568
    device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
3569

    
3570
  # leave the disks configured for the primary node
3571
  # this is a workaround that would be fixed better by
3572
  # improving the logical/physical id handling
3573
  for disk in instance.disks:
3574
    lu.cfg.SetDiskID(disk, instance.primary_node)
3575

    
3576
  return disks_ok, device_info
3577

    
3578

    
3579
def _StartInstanceDisks(lu, instance, force):
3580
  """Start the disks of an instance.
3581

3582
  """
3583
  disks_ok, _ = _AssembleInstanceDisks(lu, instance,
3584
                                           ignore_secondaries=force)
3585
  if not disks_ok:
3586
    _ShutdownInstanceDisks(lu, instance)
3587
    if force is not None and not force:
3588
      lu.proc.LogWarning("", hint="If the message above refers to a"
3589
                         " secondary node,"
3590
                         " you can retry the operation using '--force'.")
3591
    raise errors.OpExecError("Disk consistency error")
3592

    
3593

    
3594
class LUDeactivateInstanceDisks(NoHooksLU):
3595
  """Shutdown an instance's disks.
3596

3597
  """
3598
  _OP_REQP = ["instance_name"]
3599
  REQ_BGL = False
3600

    
3601
  def ExpandNames(self):
3602
    self._ExpandAndLockInstance()
3603
    self.needed_locks[locking.LEVEL_NODE] = []
3604
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3605

    
3606
  def DeclareLocks(self, level):
3607
    if level == locking.LEVEL_NODE:
3608
      self._LockInstancesNodes()
3609

    
3610
  def CheckPrereq(self):
3611
    """Check prerequisites.
3612

3613
    This checks that the instance is in the cluster.
3614

3615
    """
3616
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3617
    assert self.instance is not None, \
3618
      "Cannot retrieve locked instance %s" % self.op.instance_name
3619

    
3620
  def Exec(self, feedback_fn):
3621
    """Deactivate the disks
3622

3623
    """
3624
    instance = self.instance
3625
    _SafeShutdownInstanceDisks(self, instance)
3626

    
3627

    
3628
def _SafeShutdownInstanceDisks(lu, instance):
3629
  """Shutdown block devices of an instance.
3630

3631
  This function checks if an instance is running, before calling
3632
  _ShutdownInstanceDisks.
3633

3634
  """
3635
  pnode = instance.primary_node
3636
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
3637
  ins_l.Raise("Can't contact node %s" % pnode)
3638

    
3639
  if instance.name in ins_l.payload:
3640
    raise errors.OpExecError("Instance is running, can't shutdown"
3641
                             " block devices.")
3642

    
3643
  _ShutdownInstanceDisks(lu, instance)
3644

    
3645

    
3646
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
3647
  """Shutdown block devices of an instance.
3648

3649
  This does the shutdown on all nodes of the instance.
3650

3651
  If the ignore_primary is false, errors on the primary node are
3652
  ignored.
3653

3654
  """
3655
  all_result = True
3656
  for disk in instance.disks:
3657
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
3658
      lu.cfg.SetDiskID(top_disk, node)
3659
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
3660
      msg = result.fail_msg
3661
      if msg:
3662
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
3663
                      disk.iv_name, node, msg)
3664
        if not ignore_primary or node != instance.primary_node:
3665
          all_result = False
3666
  return all_result
3667

    
3668

    
3669
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
3670
  """Checks if a node has enough free memory.
3671

3672
  This function check if a given node has the needed amount of free
3673
  memory. In case the node has less memory or we cannot get the
3674
  information from the node, this function raise an OpPrereqError
3675
  exception.
3676

3677
  @type lu: C{LogicalUnit}
3678
  @param lu: a logical unit from which we get configuration data
3679
  @type node: C{str}
3680
  @param node: the node to check
3681
  @type reason: C{str}
3682
  @param reason: string to use in the error message
3683
  @type requested: C{int}
3684
  @param requested: the amount of memory in MiB to check for
3685
  @type hypervisor_name: C{str}
3686
  @param hypervisor_name: the hypervisor to ask for memory stats
3687
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
3688
      we cannot check the node
3689

3690
  """
3691
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
3692
  nodeinfo[node].Raise("Can't get data from node %s" % node,
3693
                       prereq=True, ecode=errors.ECODE_ENVIRON)
3694
  free_mem = nodeinfo[node].payload.get('memory_free', None)
3695
  if not isinstance(free_mem, int):
3696
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
3697
                               " was '%s'" % (node, free_mem),
3698
                               errors.ECODE_ENVIRON)
3699
  if requested > free_mem:
3700
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
3701
                               " needed %s MiB, available %s MiB" %
3702
                               (node, reason, requested, free_mem),
3703
                               errors.ECODE_NORES)
3704

    
3705

    
3706
class LUStartupInstance(LogicalUnit):
3707
  """Starts an instance.
3708

3709
  """
3710
  HPATH = "instance-start"
3711
  HTYPE = constants.HTYPE_INSTANCE
3712
  _OP_REQP = ["instance_name", "force"]
3713
  REQ_BGL = False
3714

    
3715
  def ExpandNames(self):
3716
    self._ExpandAndLockInstance()
3717

    
3718
  def BuildHooksEnv(self):
3719
    """Build hooks env.
3720

3721
    This runs on master, primary and secondary nodes of the instance.
3722

3723
    """
3724
    env = {
3725
      "FORCE": self.op.force,
3726
      }
3727
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3728
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3729
    return env, nl, nl
3730

    
3731
  def CheckPrereq(self):
3732
    """Check prerequisites.
3733

3734
    This checks that the instance is in the cluster.
3735

3736
    """
3737
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3738
    assert self.instance is not None, \
3739
      "Cannot retrieve locked instance %s" % self.op.instance_name
3740

    
3741
    # extra beparams
3742
    self.beparams = getattr(self.op, "beparams", {})
3743
    if self.beparams:
3744
      if not isinstance(self.beparams, dict):
3745
        raise errors.OpPrereqError("Invalid beparams passed: %s, expected"
3746
                                   " dict" % (type(self.beparams), ),
3747
                                   errors.ECODE_INVAL)
3748
      # fill the beparams dict
3749
      utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
3750
      self.op.beparams = self.beparams
3751

    
3752
    # extra hvparams
3753
    self.hvparams = getattr(self.op, "hvparams", {})
3754
    if self.hvparams:
3755
      if not isinstance(self.hvparams, dict):
3756
        raise errors.OpPrereqError("Invalid hvparams passed: %s, expected"
3757
                                   " dict" % (type(self.hvparams), ),
3758
                                   errors.ECODE_INVAL)
3759

    
3760
      # check hypervisor parameter syntax (locally)
3761
      cluster = self.cfg.GetClusterInfo()
3762
      utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
3763
      filled_hvp = objects.FillDict(cluster.hvparams[instance.hypervisor],
3764
                                    instance.hvparams)
3765
      filled_hvp.update(self.hvparams)
3766
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
3767
      hv_type.CheckParameterSyntax(filled_hvp)
3768
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
3769
      self.op.hvparams = self.hvparams
3770

    
3771
    _CheckNodeOnline(self, instance.primary_node)
3772

    
3773
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3774
    # check bridges existence
3775
    _CheckInstanceBridgesExist(self, instance)
3776

    
3777
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3778
                                              instance.name,
3779
                                              instance.hypervisor)
3780
    remote_info.Raise("Error checking node %s" % instance.primary_node,
3781
                      prereq=True, ecode=errors.ECODE_ENVIRON)
3782
    if not remote_info.payload: # not running already
3783
      _CheckNodeFreeMemory(self, instance.primary_node,
3784
                           "starting instance %s" % instance.name,
3785
                           bep[constants.BE_MEMORY], instance.hypervisor)
3786

    
3787
  def Exec(self, feedback_fn):
3788
    """Start the instance.
3789

3790
    """
3791
    instance = self.instance
3792
    force = self.op.force
3793

    
3794
    self.cfg.MarkInstanceUp(instance.name)
3795

    
3796
    node_current = instance.primary_node
3797

    
3798
    _StartInstanceDisks(self, instance, force)
3799

    
3800
    result = self.rpc.call_instance_start(node_current, instance,
3801
                                          self.hvparams, self.beparams)
3802
    msg = result.fail_msg
3803
    if msg:
3804
      _ShutdownInstanceDisks(self, instance)
3805
      raise errors.OpExecError("Could not start instance: %s" % msg)
3806

    
3807

    
3808
class LURebootInstance(LogicalUnit):
3809
  """Reboot an instance.
3810

3811
  """
3812
  HPATH = "instance-reboot"
3813
  HTYPE = constants.HTYPE_INSTANCE
3814
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
3815
  REQ_BGL = False
3816

    
3817
  def CheckArguments(self):
3818
    """Check the arguments.
3819

3820
    """
3821
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
3822
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
3823

    
3824
  def ExpandNames(self):
3825
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
3826
                                   constants.INSTANCE_REBOOT_HARD,
3827
                                   constants.INSTANCE_REBOOT_FULL]:
3828
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
3829
                                  (constants.INSTANCE_REBOOT_SOFT,
3830
                                   constants.INSTANCE_REBOOT_HARD,
3831
                                   constants.INSTANCE_REBOOT_FULL))
3832
    self._ExpandAndLockInstance()
3833

    
3834
  def BuildHooksEnv(self):
3835
    """Build hooks env.
3836

3837
    This runs on master, primary and secondary nodes of the instance.
3838

3839
    """
3840
    env = {
3841
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
3842
      "REBOOT_TYPE": self.op.reboot_type,
3843
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
3844
      }
3845
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3846
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3847
    return env, nl, nl
3848

    
3849
  def CheckPrereq(self):
3850
    """Check prerequisites.
3851

3852
    This checks that the instance is in the cluster.
3853

3854
    """
3855
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3856
    assert self.instance is not None, \
3857
      "Cannot retrieve locked instance %s" % self.op.instance_name
3858

    
3859
    _CheckNodeOnline(self, instance.primary_node)
3860

    
3861
    # check bridges existence
3862
    _CheckInstanceBridgesExist(self, instance)
3863

    
3864
  def Exec(self, feedback_fn):
3865
    """Reboot the instance.
3866

3867
    """
3868
    instance = self.instance
3869
    ignore_secondaries = self.op.ignore_secondaries
3870
    reboot_type = self.op.reboot_type
3871

    
3872
    node_current = instance.primary_node
3873

    
3874
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
3875
                       constants.INSTANCE_REBOOT_HARD]:
3876
      for disk in instance.disks:
3877
        self.cfg.SetDiskID(disk, node_current)
3878
      result = self.rpc.call_instance_reboot(node_current, instance,
3879
                                             reboot_type,
3880
                                             self.shutdown_timeout)
3881
      result.Raise("Could not reboot instance")
3882
    else:
3883
      result = self.rpc.call_instance_shutdown(node_current, instance,
3884
                                               self.shutdown_timeout)
3885
      result.Raise("Could not shutdown instance for full reboot")
3886
      _ShutdownInstanceDisks(self, instance)
3887
      _StartInstanceDisks(self, instance, ignore_secondaries)
3888
      result = self.rpc.call_instance_start(node_current, instance, None, None)
3889
      msg = result.fail_msg
3890
      if msg:
3891
        _ShutdownInstanceDisks(self, instance)
3892
        raise errors.OpExecError("Could not start instance for"
3893
                                 " full reboot: %s" % msg)
3894

    
3895
    self.cfg.MarkInstanceUp(instance.name)
3896

    
3897

    
3898
class LUShutdownInstance(LogicalUnit):
3899
  """Shutdown an instance.
3900

3901
  """
3902
  HPATH = "instance-stop"
3903
  HTYPE = constants.HTYPE_INSTANCE
3904
  _OP_REQP = ["instance_name"]
3905
  REQ_BGL = False
3906

    
3907
  def CheckArguments(self):
3908
    """Check the arguments.
3909

3910
    """
3911
    self.timeout = getattr(self.op, "timeout",
3912
                           constants.DEFAULT_SHUTDOWN_TIMEOUT)
3913

    
3914
  def ExpandNames(self):
3915
    self._ExpandAndLockInstance()
3916

    
3917
  def BuildHooksEnv(self):
3918
    """Build hooks env.
3919

3920
    This runs on master, primary and secondary nodes of the instance.
3921

3922
    """
3923
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3924
    env["TIMEOUT"] = self.timeout
3925
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3926
    return env, nl, nl
3927

    
3928
  def CheckPrereq(self):
3929
    """Check prerequisites.
3930

3931
    This checks that the instance is in the cluster.
3932

3933
    """
3934
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3935
    assert self.instance is not None, \
3936
      "Cannot retrieve locked instance %s" % self.op.instance_name
3937
    _CheckNodeOnline(self, self.instance.primary_node)
3938

    
3939
  def Exec(self, feedback_fn):
3940
    """Shutdown the instance.
3941

3942
    """
3943
    instance = self.instance
3944
    node_current = instance.primary_node
3945
    timeout = self.timeout
3946
    self.cfg.MarkInstanceDown(instance.name)
3947
    result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
3948
    msg = result.fail_msg
3949
    if msg:
3950
      self.proc.LogWarning("Could not shutdown instance: %s" % msg)
3951

    
3952
    _ShutdownInstanceDisks(self, instance)
3953

    
3954

    
3955
class LUReinstallInstance(LogicalUnit):
3956
  """Reinstall an instance.
3957

3958
  """
3959
  HPATH = "instance-reinstall"
3960
  HTYPE = constants.HTYPE_INSTANCE
3961
  _OP_REQP = ["instance_name"]
3962
  REQ_BGL = False
3963

    
3964
  def ExpandNames(self):
3965
    self._ExpandAndLockInstance()
3966

    
3967
  def BuildHooksEnv(self):
3968
    """Build hooks env.
3969

3970
    This runs on master, primary and secondary nodes of the instance.
3971

3972
    """
3973
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3974
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3975
    return env, nl, nl
3976

    
3977
  def CheckPrereq(self):
3978
    """Check prerequisites.
3979

3980
    This checks that the instance is in the cluster and is not running.
3981

3982
    """
3983
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3984
    assert instance is not None, \
3985
      "Cannot retrieve locked instance %s" % self.op.instance_name
3986
    _CheckNodeOnline(self, instance.primary_node)
3987

    
3988
    if instance.disk_template == constants.DT_DISKLESS:
3989
      raise errors.OpPrereqError("Instance '%s' has no disks" %
3990
                                 self.op.instance_name,
3991
                                 errors.ECODE_INVAL)
3992
    if instance.admin_up:
3993
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3994
                                 self.op.instance_name,
3995
                                 errors.ECODE_STATE)
3996
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3997
                                              instance.name,
3998
                                              instance.hypervisor)
3999
    remote_info.Raise("Error checking node %s" % instance.primary_node,
4000
                      prereq=True, ecode=errors.ECODE_ENVIRON)
4001
    if remote_info.payload:
4002
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
4003
                                 (self.op.instance_name,
4004
                                  instance.primary_node),
4005
                                 errors.ECODE_STATE)
4006

    
4007
    self.op.os_type = getattr(self.op, "os_type", None)
4008
    self.op.force_variant = getattr(self.op, "force_variant", False)
4009
    if self.op.os_type is not None:
4010
      # OS verification
4011
      pnode = _ExpandNodeName(self.cfg, instance.primary_node)
4012
      result = self.rpc.call_os_get(pnode, self.op.os_type)
4013
      result.Raise("OS '%s' not in supported OS list for primary node %s" %
4014
                   (self.op.os_type, pnode),
4015
                   prereq=True, ecode=errors.ECODE_INVAL)
4016
      if not self.op.force_variant:
4017
        _CheckOSVariant(result.payload, self.op.os_type)
4018

    
4019
    self.instance = instance
4020

    
4021
  def Exec(self, feedback_fn):
4022
    """Reinstall the instance.
4023

4024
    """
4025
    inst = self.instance
4026

    
4027
    if self.op.os_type is not None:
4028
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
4029
      inst.os = self.op.os_type
4030
      self.cfg.Update(inst, feedback_fn)
4031

    
4032
    _StartInstanceDisks(self, inst, None)
4033
    try:
4034
      feedback_fn("Running the instance OS create scripts...")
4035
      # FIXME: pass debug option from opcode to backend
4036
      result = self.rpc.call_instance_os_add(inst.primary_node, inst, True,
4037
                                             self.op.debug_level)
4038
      result.Raise("Could not install OS for instance %s on node %s" %
4039
                   (inst.name, inst.primary_node))
4040
    finally:
4041
      _ShutdownInstanceDisks(self, inst)
4042

    
4043

    
4044
class LURecreateInstanceDisks(LogicalUnit):
4045
  """Recreate an instance's missing disks.
4046

4047
  """
4048
  HPATH = "instance-recreate-disks"
4049
  HTYPE = constants.HTYPE_INSTANCE
4050
  _OP_REQP = ["instance_name", "disks"]
4051
  REQ_BGL = False
4052

    
4053
  def CheckArguments(self):
4054
    """Check the arguments.
4055

4056
    """
4057
    if not isinstance(self.op.disks, list):
4058
      raise errors.OpPrereqError("Invalid disks parameter", errors.ECODE_INVAL)
4059
    for item in self.op.disks:
4060
      if (not isinstance(item, int) or
4061
          item < 0):
4062
        raise errors.OpPrereqError("Invalid disk specification '%s'" %
4063
                                   str(item), errors.ECODE_INVAL)
4064

    
4065
  def ExpandNames(self):
4066
    self._ExpandAndLockInstance()
4067

    
4068
  def BuildHooksEnv(self):
4069
    """Build hooks env.
4070

4071
    This runs on master, primary and secondary nodes of the instance.
4072

4073
    """
4074
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4075
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4076
    return env, nl, nl
4077

    
4078
  def CheckPrereq(self):
4079
    """Check prerequisites.
4080

4081
    This checks that the instance is in the cluster and is not running.
4082

4083
    """
4084
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4085
    assert instance is not None, \
4086
      "Cannot retrieve locked instance %s" % self.op.instance_name
4087
    _CheckNodeOnline(self, instance.primary_node)
4088

    
4089
    if instance.disk_template == constants.DT_DISKLESS:
4090
      raise errors.OpPrereqError("Instance '%s' has no disks" %
4091
                                 self.op.instance_name, errors.ECODE_INVAL)
4092
    if instance.admin_up:
4093
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
4094
                                 self.op.instance_name, errors.ECODE_STATE)
4095
    remote_info = self.rpc.call_instance_info(instance.primary_node,
4096
                                              instance.name,
4097
                                              instance.hypervisor)
4098
    remote_info.Raise("Error checking node %s" % instance.primary_node,
4099
                      prereq=True, ecode=errors.ECODE_ENVIRON)
4100
    if remote_info.payload:
4101
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
4102
                                 (self.op.instance_name,
4103
                                  instance.primary_node), errors.ECODE_STATE)
4104

    
4105
    if not self.op.disks:
4106
      self.op.disks = range(len(instance.disks))
4107
    else:
4108
      for idx in self.op.disks:
4109
        if idx >= len(instance.disks):
4110
          raise errors.OpPrereqError("Invalid disk index passed '%s'" % idx,
4111
                                     errors.ECODE_INVAL)
4112

    
4113
    self.instance = instance
4114

    
4115
  def Exec(self, feedback_fn):
4116
    """Recreate the disks.
4117

4118
    """
4119
    to_skip = []
4120
    for idx, _ in enumerate(self.instance.disks):
4121
      if idx not in self.op.disks: # disk idx has not been passed in
4122
        to_skip.append(idx)
4123
        continue
4124

    
4125
    _CreateDisks(self, self.instance, to_skip=to_skip)
4126

    
4127

    
4128
class LURenameInstance(LogicalUnit):
4129
  """Rename an instance.
4130

4131
  """
4132
  HPATH = "instance-rename"
4133
  HTYPE = constants.HTYPE_INSTANCE
4134
  _OP_REQP = ["instance_name", "new_name"]
4135

    
4136
  def BuildHooksEnv(self):
4137
    """Build hooks env.
4138

4139
    This runs on master, primary and secondary nodes of the instance.
4140

4141
    """
4142
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4143
    env["INSTANCE_NEW_NAME"] = self.op.new_name
4144
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4145
    return env, nl, nl
4146

    
4147
  def CheckPrereq(self):
4148
    """Check prerequisites.
4149

4150
    This checks that the instance is in the cluster and is not running.
4151

4152
    """
4153
    self.op.instance_name = _ExpandInstanceName(self.cfg,
4154
                                                self.op.instance_name)
4155
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4156
    assert instance is not None
4157
    _CheckNodeOnline(self, instance.primary_node)
4158

    
4159
    if instance.admin_up:
4160
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
4161
                                 self.op.instance_name, errors.ECODE_STATE)
4162
    remote_info = self.rpc.call_instance_info(instance.primary_node,
4163
                                              instance.name,
4164
                                              instance.hypervisor)
4165
    remote_info.Raise("Error checking node %s" % instance.primary_node,
4166
                      prereq=True, ecode=errors.ECODE_ENVIRON)
4167
    if remote_info.payload:
4168
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
4169
                                 (self.op.instance_name,
4170
                                  instance.primary_node), errors.ECODE_STATE)
4171
    self.instance = instance
4172

    
4173
    # new name verification
4174
    name_info = utils.GetHostInfo(self.op.new_name)
4175

    
4176
    self.op.new_name = new_name = name_info.name
4177
    instance_list = self.cfg.GetInstanceList()
4178
    if new_name in instance_list:
4179
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
4180
                                 new_name, errors.ECODE_EXISTS)
4181

    
4182
    if not getattr(self.op, "ignore_ip", False):
4183
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
4184
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
4185
                                   (name_info.ip, new_name),
4186
                                   errors.ECODE_NOTUNIQUE)
4187

    
4188

    
4189
  def Exec(self, feedback_fn):
4190
    """Reinstall the instance.
4191

4192
    """
4193
    inst = self.instance
4194
    old_name = inst.name
4195

    
4196
    if inst.disk_template == constants.DT_FILE:
4197
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
4198

    
4199
    self.cfg.RenameInstance(inst.name, self.op.new_name)
4200
    # Change the instance lock. This is definitely safe while we hold the BGL
4201
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
4202
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
4203

    
4204
    # re-read the instance from the configuration after rename
4205
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
4206

    
4207
    if inst.disk_template == constants.DT_FILE:
4208
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
4209
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
4210
                                                     old_file_storage_dir,
4211
                                                     new_file_storage_dir)
4212
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
4213
                   " (but the instance has been renamed in Ganeti)" %
4214
                   (inst.primary_node, old_file_storage_dir,
4215
                    new_file_storage_dir))
4216

    
4217
    _StartInstanceDisks(self, inst, None)
4218
    try:
4219
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
4220
                                                 old_name, self.op.debug_level)
4221
      msg = result.fail_msg
4222
      if msg:
4223
        msg = ("Could not run OS rename script for instance %s on node %s"
4224
               " (but the instance has been renamed in Ganeti): %s" %
4225
               (inst.name, inst.primary_node, msg))
4226
        self.proc.LogWarning(msg)
4227
    finally:
4228
      _ShutdownInstanceDisks(self, inst)
4229

    
4230

    
4231
class LURemoveInstance(LogicalUnit):
4232
  """Remove an instance.
4233

4234
  """
4235
  HPATH = "instance-remove"
4236
  HTYPE = constants.HTYPE_INSTANCE
4237
  _OP_REQP = ["instance_name", "ignore_failures"]
4238
  REQ_BGL = False
4239

    
4240
  def CheckArguments(self):
4241
    """Check the arguments.
4242

4243
    """
4244
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
4245
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
4246

    
4247
  def ExpandNames(self):
4248
    self._ExpandAndLockInstance()
4249
    self.needed_locks[locking.LEVEL_NODE] = []
4250
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4251

    
4252
  def DeclareLocks(self, level):
4253
    if level == locking.LEVEL_NODE:
4254
      self._LockInstancesNodes()
4255

    
4256
  def BuildHooksEnv(self):
4257
    """Build hooks env.
4258

4259
    This runs on master, primary and secondary nodes of the instance.
4260

4261
    """
4262
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4263
    env["SHUTDOWN_TIMEOUT"] = self.shutdown_timeout
4264
    nl = [self.cfg.GetMasterNode()]
4265
    nl_post = list(self.instance.all_nodes) + nl
4266
    return env, nl, nl_post
4267

    
4268
  def CheckPrereq(self):
4269
    """Check prerequisites.
4270

4271
    This checks that the instance is in the cluster.
4272

4273
    """
4274
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4275
    assert self.instance is not None, \
4276
      "Cannot retrieve locked instance %s" % self.op.instance_name
4277

    
4278
  def Exec(self, feedback_fn):
4279
    """Remove the instance.
4280

4281
    """
4282
    instance = self.instance
4283
    logging.info("Shutting down instance %s on node %s",
4284
                 instance.name, instance.primary_node)
4285

    
4286
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
4287
                                             self.shutdown_timeout)
4288
    msg = result.fail_msg
4289
    if msg:
4290
      if self.op.ignore_failures:
4291
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
4292
      else:
4293
        raise errors.OpExecError("Could not shutdown instance %s on"
4294
                                 " node %s: %s" %
4295
                                 (instance.name, instance.primary_node, msg))
4296

    
4297
    logging.info("Removing block devices for instance %s", instance.name)
4298

    
4299
    if not _RemoveDisks(self, instance):
4300
      if self.op.ignore_failures:
4301
        feedback_fn("Warning: can't remove instance's disks")
4302
      else:
4303
        raise errors.OpExecError("Can't remove instance's disks")
4304

    
4305
    logging.info("Removing instance %s out of cluster config", instance.name)
4306

    
4307
    self.cfg.RemoveInstance(instance.name)
4308
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
4309

    
4310

    
4311
class LUQueryInstances(NoHooksLU):
4312
  """Logical unit for querying instances.
4313

4314
  """
4315
  # pylint: disable-msg=W0142
4316
  _OP_REQP = ["output_fields", "names", "use_locking"]
4317
  REQ_BGL = False
4318
  _SIMPLE_FIELDS = ["name", "os", "network_port", "hypervisor",
4319
                    "serial_no", "ctime", "mtime", "uuid"]
4320
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
4321
                                    "admin_state",
4322
                                    "disk_template", "ip", "mac", "bridge",
4323
                                    "nic_mode", "nic_link",
4324
                                    "sda_size", "sdb_size", "vcpus", "tags",
4325
                                    "network_port", "beparams",
4326
                                    r"(disk)\.(size)/([0-9]+)",
4327
                                    r"(disk)\.(sizes)", "disk_usage",
4328
                                    r"(nic)\.(mac|ip|mode|link)/([0-9]+)",
4329
                                    r"(nic)\.(bridge)/([0-9]+)",
4330
                                    r"(nic)\.(macs|ips|modes|links|bridges)",
4331
                                    r"(disk|nic)\.(count)",
4332
                                    "hvparams",
4333
                                    ] + _SIMPLE_FIELDS +
4334
                                  ["hv/%s" % name
4335
                                   for name in constants.HVS_PARAMETERS
4336
                                   if name not in constants.HVC_GLOBALS] +
4337
                                  ["be/%s" % name
4338
                                   for name in constants.BES_PARAMETERS])
4339
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
4340

    
4341

    
4342
  def ExpandNames(self):
4343
    _CheckOutputFields(static=self._FIELDS_STATIC,
4344
                       dynamic=self._FIELDS_DYNAMIC,
4345
                       selected=self.op.output_fields)
4346

    
4347
    self.needed_locks = {}
4348
    self.share_locks[locking.LEVEL_INSTANCE] = 1
4349
    self.share_locks[locking.LEVEL_NODE] = 1
4350

    
4351
    if self.op.names:
4352
      self.wanted = _GetWantedInstances(self, self.op.names)
4353
    else:
4354
      self.wanted = locking.ALL_SET
4355

    
4356
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
4357
    self.do_locking = self.do_node_query and self.op.use_locking
4358
    if self.do_locking:
4359
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
4360
      self.needed_locks[locking.LEVEL_NODE] = []
4361
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4362

    
4363
  def DeclareLocks(self, level):
4364
    if level == locking.LEVEL_NODE and self.do_locking:
4365
      self._LockInstancesNodes()
4366

    
4367
  def CheckPrereq(self):
4368
    """Check prerequisites.
4369

4370
    """
4371
    pass
4372

    
4373
  def Exec(self, feedback_fn):
4374
    """Computes the list of nodes and their attributes.
4375

4376
    """
4377
    # pylint: disable-msg=R0912
4378
    # way too many branches here
4379
    all_info = self.cfg.GetAllInstancesInfo()
4380
    if self.wanted == locking.ALL_SET:
4381
      # caller didn't specify instance names, so ordering is not important
4382
      if self.do_locking:
4383
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
4384
      else:
4385
        instance_names = all_info.keys()
4386
      instance_names = utils.NiceSort(instance_names)
4387
    else:
4388
      # caller did specify names, so we must keep the ordering
4389
      if self.do_locking:
4390
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
4391
      else:
4392
        tgt_set = all_info.keys()
4393
      missing = set(self.wanted).difference(tgt_set)
4394
      if missing:
4395
        raise errors.OpExecError("Some instances were removed before"
4396
                                 " retrieving their data: %s" % missing)
4397
      instance_names = self.wanted
4398

    
4399
    instance_list = [all_info[iname] for iname in instance_names]
4400

    
4401
    # begin data gathering
4402

    
4403
    nodes = frozenset([inst.primary_node for inst in instance_list])
4404
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
4405

    
4406
    bad_nodes = []
4407
    off_nodes = []
4408
    if self.do_node_query:
4409
      live_data = {}
4410
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
4411
      for name in nodes:
4412
        result = node_data[name]
4413
        if result.offline:
4414
          # offline nodes will be in both lists
4415
          off_nodes.append(name)
4416
        if result.fail_msg:
4417
          bad_nodes.append(name)
4418
        else:
4419
          if result.payload:
4420
            live_data.update(result.payload)
4421
          # else no instance is alive
4422
    else:
4423
      live_data = dict([(name, {}) for name in instance_names])
4424

    
4425
    # end data gathering
4426

    
4427
    HVPREFIX = "hv/"
4428
    BEPREFIX = "be/"
4429
    output = []
4430
    cluster = self.cfg.GetClusterInfo()
4431
    for instance in instance_list:
4432
      iout = []
4433
      i_hv = cluster.FillHV(instance, skip_globals=True)
4434
      i_be = cluster.FillBE(instance)
4435
      i_nicp = [objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
4436
                                 nic.nicparams) for nic in instance.nics]
4437
      for field in self.op.output_fields:
4438
        st_match = self._FIELDS_STATIC.Matches(field)
4439
        if field in self._SIMPLE_FIELDS:
4440
          val = getattr(instance, field)
4441
        elif field == "pnode":
4442
          val = instance.primary_node
4443
        elif field == "snodes":
4444
          val = list(instance.secondary_nodes)
4445
        elif field == "admin_state":
4446
          val = instance.admin_up
4447
        elif field == "oper_state":
4448
          if instance.primary_node in bad_nodes:
4449
            val = None
4450
          else:
4451
            val = bool(live_data.get(instance.name))
4452
        elif field == "status":
4453
          if instance.primary_node in off_nodes:
4454
            val = "ERROR_nodeoffline"
4455
          elif instance.primary_node in bad_nodes:
4456
            val = "ERROR_nodedown"
4457
          else:
4458
            running = bool(live_data.get(instance.name))
4459
            if running:
4460
              if instance.admin_up:
4461
                val = "running"
4462
              else:
4463
                val = "ERROR_up"
4464
            else:
4465
              if instance.admin_up:
4466
                val = "ERROR_down"
4467
              else:
4468
                val = "ADMIN_down"
4469
        elif field == "oper_ram":
4470
          if instance.primary_node in bad_nodes:
4471
            val = None
4472
          elif instance.name in live_data:
4473
            val = live_data[instance.name].get("memory", "?")
4474
          else:
4475
            val = "-"
4476
        elif field == "vcpus":
4477
          val = i_be[constants.BE_VCPUS]
4478
        elif field == "disk_template":
4479
          val = instance.disk_template
4480
        elif field == "ip":
4481
          if instance.nics:
4482
            val = instance.nics[0].ip
4483
          else:
4484
            val = None
4485
        elif field == "nic_mode":
4486
          if instance.nics:
4487
            val = i_nicp[0][constants.NIC_MODE]
4488
          else:
4489
            val = None
4490
        elif field == "nic_link":
4491
          if instance.nics:
4492
            val = i_nicp[0][constants.NIC_LINK]
4493
          else:
4494
            val = None
4495
        elif field == "bridge":
4496
          if (instance.nics and
4497
              i_nicp[0][constants.NIC_MODE] == constants.NIC_MODE_BRIDGED):
4498
            val = i_nicp[0][constants.NIC_LINK]
4499
          else:
4500
            val = None
4501
        elif field == "mac":
4502
          if instance.nics:
4503
            val = instance.nics[0].mac
4504
          else:
4505
            val = None
4506
        elif field == "sda_size" or field == "sdb_size":
4507
          idx = ord(field[2]) - ord('a')
4508
          try:
4509
            val = instance.FindDisk(idx).size
4510
          except errors.OpPrereqError:
4511
            val = None
4512
        elif field == "disk_usage": # total disk usage per node
4513
          disk_sizes = [{'size': disk.size} for disk in instance.disks]
4514
          val = _ComputeDiskSize(instance.disk_template, disk_sizes)
4515
        elif field == "tags":
4516
          val = list(instance.GetTags())
4517
        elif field == "hvparams":
4518
          val = i_hv
4519
        elif (field.startswith(HVPREFIX) and
4520
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS and
4521
              field[len(HVPREFIX):] not in constants.HVC_GLOBALS):
4522
          val = i_hv.get(field[len(HVPREFIX):], None)
4523
        elif field == "beparams":
4524
          val = i_be
4525
        elif (field.startswith(BEPREFIX) and
4526
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
4527
          val = i_be.get(field[len(BEPREFIX):], None)
4528
        elif st_match and st_match.groups():
4529
          # matches a variable list
4530
          st_groups = st_match.groups()
4531
          if st_groups and st_groups[0] == "disk":
4532
            if st_groups[1] == "count":
4533
              val = len(instance.disks)
4534
            elif st_groups[1] == "sizes":
4535
              val = [disk.size for disk in instance.disks]
4536
            elif st_groups[1] == "size":
4537
              try:
4538
                val = instance.FindDisk(st_groups[2]).size
4539
              except errors.OpPrereqError:
4540
                val = None
4541
            else:
4542
              assert False, "Unhandled disk parameter"
4543
          elif st_groups[0] == "nic":
4544
            if st_groups[1] == "count":
4545
              val = len(instance.nics)
4546
            elif st_groups[1] == "macs":
4547
              val = [nic.mac for nic in instance.nics]
4548
            elif st_groups[1] == "ips":
4549
              val = [nic.ip for nic in instance.nics]
4550
            elif st_groups[1] == "modes":
4551
              val = [nicp[constants.NIC_MODE] for nicp in i_nicp]
4552
            elif st_groups[1] == "links":
4553
              val = [nicp[constants.NIC_LINK] for nicp in i_nicp]
4554
            elif st_groups[1] == "bridges":
4555
              val = []
4556
              for nicp in i_nicp:
4557
                if nicp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
4558
                  val.append(nicp[constants.NIC_LINK])
4559
                else:
4560
                  val.append(None)
4561
            else:
4562
              # index-based item
4563
              nic_idx = int(st_groups[2])
4564
              if nic_idx >= len(instance.nics):
4565
                val = None
4566
              else:
4567
                if st_groups[1] == "mac":
4568
                  val = instance.nics[nic_idx].mac
4569
                elif st_groups[1] == "ip":
4570
                  val = instance.nics[nic_idx].ip
4571
                elif st_groups[1] == "mode":
4572
                  val = i_nicp[nic_idx][constants.NIC_MODE]
4573
                elif st_groups[1] == "link":
4574
                  val = i_nicp[nic_idx][constants.NIC_LINK]
4575
                elif st_groups[1] == "bridge":
4576
                  nic_mode = i_nicp[nic_idx][constants.NIC_MODE]
4577
                  if nic_mode == constants.NIC_MODE_BRIDGED:
4578
                    val = i_nicp[nic_idx][constants.NIC_LINK]
4579
                  else:
4580
                    val = None
4581
                else:
4582
                  assert False, "Unhandled NIC parameter"
4583
          else:
4584
            assert False, ("Declared but unhandled variable parameter '%s'" %
4585
                           field)
4586
        else:
4587
          assert False, "Declared but unhandled parameter '%s'" % field
4588
        iout.append(val)
4589
      output.append(iout)
4590

    
4591
    return output
4592

    
4593

    
4594
class LUFailoverInstance(LogicalUnit):
4595
  """Failover an instance.
4596

4597
  """
4598
  HPATH = "instance-failover"
4599
  HTYPE = constants.HTYPE_INSTANCE
4600
  _OP_REQP = ["instance_name", "ignore_consistency"]
4601
  REQ_BGL = False
4602

    
4603
  def CheckArguments(self):
4604
    """Check the arguments.
4605

4606
    """
4607
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
4608
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
4609

    
4610
  def ExpandNames(self):
4611
    self._ExpandAndLockInstance()
4612
    self.needed_locks[locking.LEVEL_NODE] = []
4613
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4614

    
4615
  def DeclareLocks(self, level):
4616
    if level == locking.LEVEL_NODE:
4617
      self._LockInstancesNodes()
4618

    
4619
  def BuildHooksEnv(self):
4620
    """Build hooks env.
4621

4622
    This runs on master, primary and secondary nodes of the instance.
4623

4624
    """
4625
    instance = self.instance
4626
    source_node = instance.primary_node
4627
    target_node = instance.secondary_nodes[0]
4628
    env = {
4629
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
4630
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
4631
      "OLD_PRIMARY": source_node,
4632
      "OLD_SECONDARY": target_node,
4633
      "NEW_PRIMARY": target_node,
4634
      "NEW_SECONDARY": source_node,
4635
      }
4636
    env.update(_BuildInstanceHookEnvByObject(self, instance))
4637
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
4638
    nl_post = list(nl)
4639
    nl_post.append(source_node)
4640
    return env, nl, nl_post
4641

    
4642
  def CheckPrereq(self):
4643
    """Check prerequisites.
4644

4645
    This checks that the instance is in the cluster.
4646

4647
    """
4648
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4649
    assert self.instance is not None, \
4650
      "Cannot retrieve locked instance %s" % self.op.instance_name
4651

    
4652
    bep = self.cfg.GetClusterInfo().FillBE(instance)
4653
    if instance.disk_template not in constants.DTS_NET_MIRROR:
4654
      raise errors.OpPrereqError("Instance's disk layout is not"
4655
                                 " network mirrored, cannot failover.",
4656
                                 errors.ECODE_STATE)
4657

    
4658
    secondary_nodes = instance.secondary_nodes
4659
    if not secondary_nodes:
4660
      raise errors.ProgrammerError("no secondary node but using "
4661
                                   "a mirrored disk template")
4662

    
4663
    target_node = secondary_nodes[0]
4664
    _CheckNodeOnline(self, target_node)
4665
    _CheckNodeNotDrained(self, target_node)
4666
    if instance.admin_up:
4667
      # check memory requirements on the secondary node
4668
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
4669
                           instance.name, bep[constants.BE_MEMORY],
4670
                           instance.hypervisor)
4671
    else:
4672
      self.LogInfo("Not checking memory on the secondary node as"
4673
                   " instance will not be started")
4674

    
4675
    # check bridge existance
4676
    _CheckInstanceBridgesExist(self, instance, node=target_node)
4677

    
4678
  def Exec(self, feedback_fn):
4679
    """Failover an instance.
4680

4681
    The failover is done by shutting it down on its present node and
4682
    starting it on the secondary.
4683

4684
    """
4685
    instance = self.instance
4686

    
4687
    source_node = instance.primary_node
4688
    target_node = instance.secondary_nodes[0]
4689

    
4690
    if instance.admin_up:
4691
      feedback_fn("* checking disk consistency between source and target")
4692
      for dev in instance.disks:
4693
        # for drbd, these are drbd over lvm
4694
        if not _CheckDiskConsistency(self, dev, target_node, False):
4695
          if not self.op.ignore_consistency:
4696
            raise errors.OpExecError("Disk %s is degraded on target node,"
4697
                                     " aborting failover." % dev.iv_name)
4698
    else:
4699
      feedback_fn("* not checking disk consistency as instance is not running")
4700

    
4701
    feedback_fn("* shutting down instance on source node")
4702
    logging.info("Shutting down instance %s on node %s",
4703
                 instance.name, source_node)
4704

    
4705
    result = self.rpc.call_instance_shutdown(source_node, instance,
4706
                                             self.shutdown_timeout)
4707
    msg = result.fail_msg
4708
    if msg:
4709
      if self.op.ignore_consistency:
4710
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
4711
                             " Proceeding anyway. Please make sure node"
4712
                             " %s is down. Error details: %s",
4713
                             instance.name, source_node, source_node, msg)
4714
      else:
4715
        raise errors.OpExecError("Could not shutdown instance %s on"
4716
                                 " node %s: %s" %
4717
                                 (instance.name, source_node, msg))
4718

    
4719
    feedback_fn("* deactivating the instance's disks on source node")
4720
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
4721
      raise errors.OpExecError("Can't shut down the instance's disks.")
4722

    
4723
    instance.primary_node = target_node
4724
    # distribute new instance config to the other nodes
4725
    self.cfg.Update(instance, feedback_fn)
4726

    
4727
    # Only start the instance if it's marked as up
4728
    if instance.admin_up:
4729
      feedback_fn("* activating the instance's disks on target node")
4730
      logging.info("Starting instance %s on node %s",
4731
                   instance.name, target_node)
4732

    
4733
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
4734
                                               ignore_secondaries=True)
4735
      if not disks_ok:
4736
        _ShutdownInstanceDisks(self, instance)
4737
        raise errors.OpExecError("Can't activate the instance's disks")
4738

    
4739
      feedback_fn("* starting the instance on the target node")
4740
      result = self.rpc.call_instance_start(target_node, instance, None, None)
4741
      msg = result.fail_msg
4742
      if msg:
4743
        _ShutdownInstanceDisks(self, instance)
4744
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
4745
                                 (instance.name, target_node, msg))
4746

    
4747

    
4748
class LUMigrateInstance(LogicalUnit):
4749
  """Migrate an instance.
4750

4751
  This is migration without shutting down, compared to the failover,
4752
  which is done with shutdown.
4753

4754
  """
4755
  HPATH = "instance-migrate"
4756
  HTYPE = constants.HTYPE_INSTANCE
4757
  _OP_REQP = ["instance_name", "live", "cleanup"]
4758

    
4759
  REQ_BGL = False
4760

    
4761
  def ExpandNames(self):
4762
    self._ExpandAndLockInstance()
4763

    
4764
    self.needed_locks[locking.LEVEL_NODE] = []
4765
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4766

    
4767
    self._migrater = TLMigrateInstance(self, self.op.instance_name,
4768
                                       self.op.live, self.op.cleanup)
4769
    self.tasklets = [self._migrater]
4770

    
4771
  def DeclareLocks(self, level):
4772
    if level == locking.LEVEL_NODE:
4773
      self._LockInstancesNodes()
4774

    
4775
  def BuildHooksEnv(self):
4776
    """Build hooks env.
4777

4778
    This runs on master, primary and secondary nodes of the instance.
4779

4780
    """
4781
    instance = self._migrater.instance
4782
    source_node = instance.primary_node
4783
    target_node = instance.secondary_nodes[0]
4784
    env = _BuildInstanceHookEnvByObject(self, instance)
4785
    env["MIGRATE_LIVE"] = self.op.live
4786
    env["MIGRATE_CLEANUP"] = self.op.cleanup
4787
    env.update({
4788
        "OLD_PRIMARY": source_node,
4789
        "OLD_SECONDARY": target_node,
4790
        "NEW_PRIMARY": target_node,
4791
        "NEW_SECONDARY": source_node,
4792
        })
4793
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
4794
    nl_post = list(nl)
4795
    nl_post.append(source_node)
4796
    return env, nl, nl_post
4797

    
4798

    
4799
class LUMoveInstance(LogicalUnit):
4800
  """Move an instance by data-copying.
4801

4802
  """
4803
  HPATH = "instance-move"
4804
  HTYPE = constants.HTYPE_INSTANCE
4805
  _OP_REQP = ["instance_name", "target_node"]
4806
  REQ_BGL = False
4807

    
4808
  def CheckArguments(self):
4809
    """Check the arguments.
4810

4811
    """
4812
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
4813
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
4814

    
4815
  def ExpandNames(self):
4816
    self._ExpandAndLockInstance()
4817
    target_node = _ExpandNodeName(self.cfg, self.op.target_node)
4818
    self.op.target_node = target_node
4819
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
4820
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
4821

    
4822
  def DeclareLocks(self, level):
4823
    if level == locking.LEVEL_NODE:
4824
      self._LockInstancesNodes(primary_only=True)
4825

    
4826
  def BuildHooksEnv(self):
4827
    """Build hooks env.
4828

4829
    This runs on master, primary and secondary nodes of the instance.
4830

4831
    """
4832
    env = {
4833
      "TARGET_NODE": self.op.target_node,
4834
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
4835
      }
4836
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4837
    nl = [self.cfg.GetMasterNode()] + [self.instance.primary_node,
4838
                                       self.op.target_node]
4839
    return env, nl, nl
4840

    
4841
  def CheckPrereq(self):
4842
    """Check prerequisites.
4843

4844
    This checks that the instance is in the cluster.
4845

4846
    """
4847
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4848
    assert self.instance is not None, \
4849
      "Cannot retrieve locked instance %s" % self.op.instance_name
4850

    
4851
    node = self.cfg.GetNodeInfo(self.op.target_node)
4852
    assert node is not None, \
4853
      "Cannot retrieve locked node %s" % self.op.target_node
4854

    
4855
    self.target_node = target_node = node.name
4856

    
4857
    if target_node == instance.primary_node:
4858
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
4859
                                 (instance.name, target_node),
4860
                                 errors.ECODE_STATE)
4861

    
4862
    bep = self.cfg.GetClusterInfo().FillBE(instance)
4863

    
4864
    for idx, dsk in enumerate(instance.disks):
4865
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
4866
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
4867
                                   " cannot copy" % idx, errors.ECODE_STATE)
4868

    
4869
    _CheckNodeOnline(self, target_node)
4870
    _CheckNodeNotDrained(self, target_node)
4871

    
4872
    if instance.admin_up:
4873
      # check memory requirements on the secondary node
4874
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
4875
                           instance.name, bep[constants.BE_MEMORY],
4876
                           instance.hypervisor)
4877
    else:
4878
      self.LogInfo("Not checking memory on the secondary node as"
4879
                   " instance will not be started")
4880

    
4881
    # check bridge existance
4882
    _CheckInstanceBridgesExist(self, instance, node=target_node)
4883

    
4884
  def Exec(self, feedback_fn):
4885
    """Move an instance.
4886

4887
    The move is done by shutting it down on its present node, copying
4888
    the data over (slow) and starting it on the new node.
4889

4890
    """
4891
    instance = self.instance
4892

    
4893
    source_node = instance.primary_node
4894
    target_node = self.target_node
4895

    
4896
    self.LogInfo("Shutting down instance %s on source node %s",
4897
                 instance.name, source_node)
4898

    
4899
    result = self.rpc.call_instance_shutdown(source_node, instance,
4900
                                             self.shutdown_timeout)
4901
    msg = result.fail_msg
4902
    if msg:
4903
      if self.op.ignore_consistency:
4904
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
4905
                             " Proceeding anyway. Please make sure node"
4906
                             " %s is down. Error details: %s",
4907
                             instance.name, source_node, source_node, msg)
4908
      else:
4909
        raise errors.OpExecError("Could not shutdown instance %s on"
4910
                                 " node %s: %s" %
4911
                                 (instance.name, source_node, msg))
4912

    
4913
    # create the target disks
4914
    try:
4915
      _CreateDisks(self, instance, target_node=target_node)
4916
    except errors.OpExecError:
4917
      self.LogWarning("Device creation failed, reverting...")
4918
      try:
4919
        _RemoveDisks(self, instance, target_node=target_node)
4920
      finally:
4921
        self.cfg.ReleaseDRBDMinors(instance.name)
4922
        raise
4923

    
4924
    cluster_name = self.cfg.GetClusterInfo().cluster_name
4925

    
4926
    errs = []
4927
    # activate, get path, copy the data over
4928
    for idx, disk in enumerate(instance.disks):
4929
      self.LogInfo("Copying data for disk %d", idx)
4930
      result = self.rpc.call_blockdev_assemble(target_node, disk,
4931
                                               instance.name, True)
4932
      if result.fail_msg:
4933
        self.LogWarning("Can't assemble newly created disk %d: %s",
4934
                        idx, result.fail_msg)
4935
        errs.append(result.fail_msg)
4936
        break
4937
      dev_path = result.payload
4938
      result = self.rpc.call_blockdev_export(source_node, disk,
4939
                                             target_node, dev_path,
4940
                                             cluster_name)
4941
      if result.fail_msg:
4942
        self.LogWarning("Can't copy data over for disk %d: %s",
4943
                        idx, result.fail_msg)
4944
        errs.append(result.fail_msg)
4945
        break
4946

    
4947
    if errs:
4948
      self.LogWarning("Some disks failed to copy, aborting")
4949
      try:
4950
        _RemoveDisks(self, instance, target_node=target_node)
4951
      finally:
4952
        self.cfg.ReleaseDRBDMinors(instance.name)
4953
        raise errors.OpExecError("Errors during disk copy: %s" %
4954
                                 (",".join(errs),))
4955

    
4956
    instance.primary_node = target_node
4957
    self.cfg.Update(instance, feedback_fn)
4958

    
4959
    self.LogInfo("Removing the disks on the original node")
4960
    _RemoveDisks(self, instance, target_node=source_node)
4961

    
4962
    # Only start the instance if it's marked as up
4963
    if instance.admin_up:
4964
      self.LogInfo("Starting instance %s on node %s",
4965
                   instance.name, target_node)
4966

    
4967
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
4968
                                           ignore_secondaries=True)
4969
      if not disks_ok:
4970
        _ShutdownInstanceDisks(self, instance)
4971
        raise errors.OpExecError("Can't activate the instance's disks")
4972

    
4973
      result = self.rpc.call_instance_start(target_node, instance, None, None)
4974
      msg = result.fail_msg
4975
      if msg:
4976
        _ShutdownInstanceDisks(self, instance)
4977
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
4978
                                 (instance.name, target_node, msg))
4979

    
4980

    
4981
class LUMigrateNode(LogicalUnit):
4982
  """Migrate all instances from a node.
4983

4984
  """
4985
  HPATH = "node-migrate"
4986
  HTYPE = constants.HTYPE_NODE
4987
  _OP_REQP = ["node_name", "live"]
4988
  REQ_BGL = False
4989

    
4990
  def ExpandNames(self):
4991
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4992

    
4993
    self.needed_locks = {
4994
      locking.LEVEL_NODE: [self.op.node_name],
4995
      }
4996

    
4997
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
4998

    
4999
    # Create tasklets for migrating instances for all instances on this node
5000
    names = []
5001
    tasklets = []
5002

    
5003
    for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name):
5004
      logging.debug("Migrating instance %s", inst.name)
5005
      names.append(inst.name)
5006

    
5007
      tasklets.append(TLMigrateInstance(self, inst.name, self.op.live, False))
5008

    
5009
    self.tasklets = tasklets
5010

    
5011
    # Declare instance locks
5012
    self.needed_locks[locking.LEVEL_INSTANCE] = names
5013

    
5014
  def DeclareLocks(self, level):
5015
    if level == locking.LEVEL_NODE:
5016
      self._LockInstancesNodes()
5017

    
5018
  def BuildHooksEnv(self):
5019
    """Build hooks env.
5020

5021
    This runs on the master, the primary and all the secondaries.
5022

5023
    """
5024
    env = {
5025
      "NODE_NAME": self.op.node_name,
5026
      }
5027

    
5028
    nl = [self.cfg.GetMasterNode()]
5029

    
5030
    return (env, nl, nl)
5031

    
5032

    
5033
class TLMigrateInstance(Tasklet):
5034
  def __init__(self, lu, instance_name, live, cleanup):
5035
    """Initializes this class.
5036

5037
    """
5038
    Tasklet.__init__(self, lu)
5039

    
5040
    # Parameters
5041
    self.instance_name = instance_name
5042
    self.live = live
5043
    self.cleanup = cleanup
5044

    
5045
  def CheckPrereq(self):
5046
    """Check prerequisites.
5047

5048
    This checks that the instance is in the cluster.
5049

5050
    """
5051
    instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
5052
    instance = self.cfg.GetInstanceInfo(instance_name)
5053
    assert instance is not None
5054

    
5055
    if instance.disk_template != constants.DT_DRBD8:
5056
      raise errors.OpPrereqError("Instance's disk layout is not"
5057
                                 " drbd8, cannot migrate.", errors.ECODE_STATE)
5058

    
5059
    secondary_nodes = instance.secondary_nodes
5060
    if not secondary_nodes:
5061
      raise errors.ConfigurationError("No secondary node but using"
5062
                                      " drbd8 disk template")
5063

    
5064
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
5065

    
5066
    target_node = secondary_nodes[0]
5067
    # check memory requirements on the secondary node
5068
    _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
5069
                         instance.name, i_be[constants.BE_MEMORY],
5070
                         instance.hypervisor)
5071

    
5072
    # check bridge existance
5073
    _CheckInstanceBridgesExist(self, instance, node=target_node)
5074

    
5075
    if not self.cleanup:
5076
      _CheckNodeNotDrained(self, target_node)
5077
      result = self.rpc.call_instance_migratable(instance.primary_node,
5078
                                                 instance)
5079
      result.Raise("Can't migrate, please use failover",
5080
                   prereq=True, ecode=errors.ECODE_STATE)
5081

    
5082
    self.instance = instance
5083

    
5084
  def _WaitUntilSync(self):
5085
    """Poll with custom rpc for disk sync.
5086

5087
    This uses our own step-based rpc call.
5088

5089
    """
5090
    self.feedback_fn("* wait until resync is done")
5091
    all_done = False
5092
    while not all_done:
5093
      all_done = True
5094
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
5095
                                            self.nodes_ip,
5096
                                            self.instance.disks)
5097
      min_percent = 100
5098
      for node, nres in result.items():
5099
        nres.Raise("Cannot resync disks on node %s" % node)
5100
        node_done, node_percent = nres.payload
5101
        all_done = all_done and node_done
5102
        if node_percent is not None:
5103
          min_percent = min(min_percent, node_percent)
5104
      if not all_done:
5105
        if min_percent < 100:
5106
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
5107
        time.sleep(2)
5108

    
5109
  def _EnsureSecondary(self, node):
5110
    """Demote a node to secondary.
5111

5112
    """
5113
    self.feedback_fn("* switching node %s to secondary mode" % node)
5114

    
5115
    for dev in self.instance.disks:
5116
      self.cfg.SetDiskID(dev, node)
5117

    
5118
    result = self.rpc.call_blockdev_close(node, self.instance.name,
5119
                                          self.instance.disks)
5120
    result.Raise("Cannot change disk to secondary on node %s" % node)
5121

    
5122
  def _GoStandalone(self):
5123
    """Disconnect from the network.
5124

5125
    """
5126
    self.feedback_fn("* changing into standalone mode")
5127
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
5128
                                               self.instance.disks)
5129
    for node, nres in result.items():
5130
      nres.Raise("Cannot disconnect disks node %s" % node)
5131

    
5132
  def _GoReconnect(self, multimaster):
5133
    """Reconnect to the network.
5134

5135
    """
5136
    if multimaster:
5137
      msg = "dual-master"
5138
    else:
5139
      msg = "single-master"
5140
    self.feedback_fn("* changing disks into %s mode" % msg)
5141
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
5142
                                           self.instance.disks,
5143
                                           self.instance.name, multimaster)
5144
    for node, nres in result.items():
5145
      nres.Raise("Cannot change disks config on node %s" % node)
5146

    
5147
  def _ExecCleanup(self):
5148
    """Try to cleanup after a failed migration.
5149

5150
    The cleanup is done by:
5151
      - check that the instance is running only on one node
5152
        (and update the config if needed)
5153
      - change disks on its secondary node to secondary
5154
      - wait until disks are fully synchronized
5155
      - disconnect from the network
5156
      - change disks into single-master mode
5157
      - wait again until disks are fully synchronized
5158

5159
    """
5160
    instance = self.instance
5161
    target_node = self.target_node
5162
    source_node = self.source_node
5163

    
5164
    # check running on only one node
5165
    self.feedback_fn("* checking where the instance actually runs"
5166
                     " (if this hangs, the hypervisor might be in"
5167
                     " a bad state)")
5168
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
5169
    for node, result in ins_l.items():
5170
      result.Raise("Can't contact node %s" % node)
5171

    
5172
    runningon_source = instance.name in ins_l[source_node].payload
5173
    runningon_target = instance.name in ins_l[target_node].payload
5174

    
5175
    if runningon_source and runningon_target:
5176
      raise errors.OpExecError("Instance seems to be running on two nodes,"
5177
                               " or the hypervisor is confused. You will have"
5178
                               " to ensure manually that it runs only on one"
5179
                               " and restart this operation.")
5180

    
5181
    if not (runningon_source or runningon_target):
5182
      raise errors.OpExecError("Instance does not seem to be running at all."
5183
                               " In this case, it's safer to repair by"
5184
                               " running 'gnt-instance stop' to ensure disk"
5185
                               " shutdown, and then restarting it.")
5186

    
5187
    if runningon_target:
5188
      # the migration has actually succeeded, we need to update the config
5189
      self.feedback_fn("* instance running on secondary node (%s),"
5190
                       " updating config" % target_node)
5191
      instance.primary_node = target_node
5192
      self.cfg.Update(instance, self.feedback_fn)
5193
      demoted_node = source_node
5194
    else:
5195
      self.feedback_fn("* instance confirmed to be running on its"
5196
                       " primary node (%s)" % source_node)
5197
      demoted_node = target_node
5198

    
5199
    self._EnsureSecondary(demoted_node)
5200
    try:
5201
      self._WaitUntilSync()
5202
    except errors.OpExecError:
5203
      # we ignore here errors, since if the device is standalone, it
5204
      # won't be able to sync
5205
      pass
5206
    self._GoStandalone()
5207
    self._GoReconnect(False)
5208
    self._WaitUntilSync()
5209

    
5210
    self.feedback_fn("* done")
5211

    
5212
  def _RevertDiskStatus(self):
5213
    """Try to revert the disk status after a failed migration.
5214

5215
    """
5216
    target_node = self.target_node
5217
    try:
5218
      self._EnsureSecondary(target_node)
5219
      self._GoStandalone()
5220
      self._GoReconnect(False)
5221
      self._WaitUntilSync()
5222
    except errors.OpExecError, err:
5223
      self.lu.LogWarning("Migration failed and I can't reconnect the"
5224
                         " drives: error '%s'\n"
5225
                         "Please look and recover the instance status" %
5226
                         str(err))
5227

    
5228
  def _AbortMigration(self):
5229
    """Call the hypervisor code to abort a started migration.
5230

5231
    """
5232
    instance = self.instance
5233
    target_node = self.target_node
5234
    migration_info = self.migration_info
5235

    
5236
    abort_result = self.rpc.call_finalize_migration(target_node,
5237
                                                    instance,
5238
                                                    migration_info,
5239
                                                    False)
5240
    abort_msg = abort_result.fail_msg
5241
    if abort_msg:
5242
      logging.error("Aborting migration failed on target node %s: %s",
5243
                    target_node, abort_msg)
5244
      # Don't raise an exception here, as we stil have to try to revert the
5245
      # disk status, even if this step failed.
5246

    
5247
  def _ExecMigration(self):
5248
    """Migrate an instance.
5249

5250
    The migrate is done by:
5251
      - change the disks into dual-master mode
5252
      - wait until disks are fully synchronized again
5253
      - migrate the instance
5254
      - change disks on the new secondary node (the old primary) to secondary
5255
      - wait until disks are fully synchronized
5256
      - change disks into single-master mode
5257

5258
    """
5259
    instance = self.instance
5260
    target_node = self.target_node
5261
    source_node = self.source_node
5262

    
5263
    self.feedback_fn("* checking disk consistency between source and target")
5264
    for dev in instance.disks:
5265
      if not _CheckDiskConsistency(self, dev, target_node, False):
5266
        raise errors.OpExecError("Disk %s is degraded or not fully"
5267
                                 " synchronized on target node,"
5268
                                 " aborting migrate." % dev.iv_name)
5269

    
5270
    # First get the migration information from the remote node
5271
    result = self.rpc.call_migration_info(source_node, instance)
5272
    msg = result.fail_msg
5273
    if msg:
5274
      log_err = ("Failed fetching source migration information from %s: %s" %
5275
                 (source_node, msg))
5276
      logging.error(log_err)
5277
      raise errors.OpExecError(log_err)
5278

    
5279
    self.migration_info = migration_info = result.payload
5280

    
5281
    # Then switch the disks to master/master mode
5282
    self._EnsureSecondary(target_node)
5283
    self._GoStandalone()
5284
    self._GoReconnect(True)
5285
    self._WaitUntilSync()
5286

    
5287
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
5288
    result = self.rpc.call_accept_instance(target_node,
5289
                                           instance,
5290
                                           migration_info,
5291
                                           self.nodes_ip[target_node])
5292

    
5293
    msg = result.fail_msg
5294
    if msg:
5295
      logging.error("Instance pre-migration failed, trying to revert"
5296
                    " disk status: %s", msg)
5297
      self.feedback_fn("Pre-migration failed, aborting")
5298
      self._AbortMigration()
5299
      self._RevertDiskStatus()
5300
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
5301
                               (instance.name, msg))
5302

    
5303
    self.feedback_fn("* migrating instance to %s" % target_node)
5304
    time.sleep(10)
5305
    result = self.rpc.call_instance_migrate(source_node, instance,
5306
                                            self.nodes_ip[target_node],
5307
                                            self.live)
5308
    msg = result.fail_msg
5309
    if msg:
5310
      logging.error("Instance migration failed, trying to revert"
5311
                    " disk status: %s", msg)
5312
      self.feedback_fn("Migration failed, aborting")
5313
      self._AbortMigration()
5314
      self._RevertDiskStatus()
5315
      raise errors.OpExecError("Could not migrate instance %s: %s" %
5316
                               (instance.name, msg))
5317
    time.sleep(10)
5318

    
5319
    instance.primary_node = target_node
5320
    # distribute new instance config to the other nodes
5321
    self.cfg.Update(instance, self.feedback_fn)
5322

    
5323
    result = self.rpc.call_finalize_migration(target_node,
5324
                                              instance,
5325
                                              migration_info,
5326
                                              True)
5327
    msg = result.fail_msg
5328
    if msg:
5329
      logging.error("Instance migration succeeded, but finalization failed:"
5330
                    " %s", msg)
5331
      raise errors.OpExecError("Could not finalize instance migration: %s" %
5332
                               msg)
5333

    
5334
    self._EnsureSecondary(source_node)
5335
    self._WaitUntilSync()
5336
    self._GoStandalone()
5337
    self._GoReconnect(False)
5338
    self._WaitUntilSync()
5339

    
5340
    self.feedback_fn("* done")
5341

    
5342
  def Exec(self, feedback_fn):
5343
    """Perform the migration.
5344

5345
    """
5346
    feedback_fn("Migrating instance %s" % self.instance.name)
5347

    
5348
    self.feedback_fn = feedback_fn
5349

    
5350
    self.source_node = self.instance.primary_node
5351
    self.target_node = self.instance.secondary_nodes[0]
5352
    self.all_nodes = [self.source_node, self.target_node]
5353
    self.nodes_ip = {
5354
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
5355
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
5356
      }
5357

    
5358
    if self.cleanup:
5359
      return self._ExecCleanup()
5360
    else:
5361
      return self._ExecMigration()
5362

    
5363

    
5364
def _CreateBlockDev(lu, node, instance, device, force_create,
5365
                    info, force_open):
5366
  """Create a tree of block devices on a given node.
5367

5368
  If this device type has to be created on secondaries, create it and
5369
  all its children.
5370

5371
  If not, just recurse to children keeping the same 'force' value.
5372

5373
  @param lu: the lu on whose behalf we execute
5374
  @param node: the node on which to create the device
5375
  @type instance: L{objects.Instance}
5376
  @param instance: the instance which owns the device
5377
  @type device: L{objects.Disk}
5378
  @param device: the device to create
5379
  @type force_create: boolean
5380
  @param force_create: whether to force creation of this device; this
5381
      will be change to True whenever we find a device which has
5382
      CreateOnSecondary() attribute
5383
  @param info: the extra 'metadata' we should attach to the device
5384
      (this will be represented as a LVM tag)
5385
  @type force_open: boolean
5386
  @param force_open: this parameter will be passes to the
5387
      L{backend.BlockdevCreate} function where it specifies
5388
      whether we run on primary or not, and it affects both
5389
      the child assembly and the device own Open() execution
5390

5391
  """
5392
  if device.CreateOnSecondary():
5393
    force_create = True
5394

    
5395
  if device.children:
5396
    for child in device.children:
5397
      _CreateBlockDev(lu, node, instance, child, force_create,
5398
                      info, force_open)
5399

    
5400
  if not force_create:
5401
    return
5402

    
5403
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
5404

    
5405

    
5406
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
5407
  """Create a single block device on a given node.
5408

5409
  This will not recurse over children of the device, so they must be
5410
  created in advance.
5411

5412
  @param lu: the lu on whose behalf we execute
5413
  @param node: the node on which to create the device
5414
  @type instance: L{objects.Instance}
5415
  @param instance: the instance which owns the device
5416
  @type device: L{objects.Disk}
5417
  @param device: the device to create
5418
  @param info: the extra 'metadata' we should attach to the device
5419
      (this will be represented as a LVM tag)
5420
  @type force_open: boolean
5421
  @param force_open: this parameter will be passes to the
5422
      L{backend.BlockdevCreate} function where it specifies
5423
      whether we run on primary or not, and it affects both
5424
      the child assembly and the device own Open() execution
5425

5426
  """
5427
  lu.cfg.SetDiskID(device, node)
5428
  result = lu.rpc.call_blockdev_create(node, device, device.size,
5429
                                       instance.name, force_open, info)
5430
  result.Raise("Can't create block device %s on"
5431
               " node %s for instance %s" % (device, node, instance.name))
5432
  if device.physical_id is None:
5433
    device.physical_id = result.payload
5434

    
5435

    
5436
def _GenerateUniqueNames(lu, exts):
5437
  """Generate a suitable LV name.
5438

5439
  This will generate a logical volume name for the given instance.
5440

5441
  """
5442
  results = []
5443
  for val in exts:
5444
    new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
5445
    results.append("%s%s" % (new_id, val))
5446
  return results
5447

    
5448

    
5449
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
5450
                         p_minor, s_minor):
5451
  """Generate a drbd8 device complete with its children.
5452

5453
  """
5454
  port = lu.cfg.AllocatePort()
5455
  vgname = lu.cfg.GetVGName()
5456
  shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
5457
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
5458
                          logical_id=(vgname, names[0]))
5459
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
5460
                          logical_id=(vgname, names[1]))
5461
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
5462
                          logical_id=(primary, secondary, port,
5463
                                      p_minor, s_minor,
5464
                                      shared_secret),
5465
                          children=[dev_data, dev_meta],
5466
                          iv_name=iv_name)
5467
  return drbd_dev
5468

    
5469

    
5470
def _GenerateDiskTemplate(lu, template_name,
5471
                          instance_name, primary_node,
5472
                          secondary_nodes, disk_info,
5473
                          file_storage_dir, file_driver,
5474
                          base_index):
5475
  """Generate the entire disk layout for a given template type.
5476

5477
  """
5478
  #TODO: compute space requirements
5479

    
5480
  vgname = lu.cfg.GetVGName()
5481
  disk_count = len(disk_info)
5482
  disks = []
5483
  if template_name == constants.DT_DISKLESS:
5484
    pass
5485
  elif template_name == constants.DT_PLAIN:
5486
    if len(secondary_nodes) != 0:
5487
      raise errors.ProgrammerError("Wrong template configuration")
5488

    
5489
    names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
5490
                                      for i in range(disk_count)])
5491
    for idx, disk in enumerate(disk_info):
5492
      disk_index = idx + base_index
5493
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
5494
                              logical_id=(vgname, names[idx]),
5495
                              iv_name="disk/%d" % disk_index,
5496
                              mode=disk["mode"])
5497
      disks.append(disk_dev)
5498
  elif template_name == constants.DT_DRBD8:
5499
    if len(secondary_nodes) != 1:
5500
      raise errors.ProgrammerError("Wrong template configuration")
5501
    remote_node = secondary_nodes[0]
5502
    minors = lu.cfg.AllocateDRBDMinor(
5503
      [primary_node, remote_node] * len(disk_info), instance_name)
5504

    
5505
    names = []
5506
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
5507
                                               for i in range(disk_count)]):
5508
      names.append(lv_prefix + "_data")
5509
      names.append(lv_prefix + "_meta")
5510
    for idx, disk in enumerate(disk_info):
5511
      disk_index = idx + base_index
5512
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
5513
                                      disk["size"], names[idx*2:idx*2+2],
5514
                                      "disk/%d" % disk_index,
5515
                                      minors[idx*2], minors[idx*2+1])
5516
      disk_dev.mode = disk["mode"]
5517
      disks.append(disk_dev)
5518
  elif template_name == constants.DT_FILE:
5519
    if len(secondary_nodes) != 0:
5520
      raise errors.ProgrammerError("Wrong template configuration")
5521

    
5522
    for idx, disk in enumerate(disk_info):
5523
      disk_index = idx + base_index
5524
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
5525
                              iv_name="disk/%d" % disk_index,
5526
                              logical_id=(file_driver,
5527
                                          "%s/disk%d" % (file_storage_dir,
5528
                                                         disk_index)),
5529
                              mode=disk["mode"])
5530
      disks.append(disk_dev)
5531
  else:
5532
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
5533
  return disks
5534

    
5535

    
5536
def _GetInstanceInfoText(instance):
5537
  """Compute that text that should be added to the disk's metadata.
5538

5539
  """
5540
  return "originstname+%s" % instance.name
5541

    
5542

    
5543
def _CreateDisks(lu, instance, to_skip=None, target_node=None):
5544
  """Create all disks for an instance.
5545

5546
  This abstracts away some work from AddInstance.
5547

5548
  @type lu: L{LogicalUnit}
5549
  @param lu: the logical unit on whose behalf we execute
5550
  @type instance: L{objects.Instance}
5551
  @param instance: the instance whose disks we should create
5552
  @type to_skip: list
5553
  @param to_skip: list of indices to skip
5554
  @type target_node: string
5555
  @param target_node: if passed, overrides the target node for creation
5556
  @rtype: boolean
5557
  @return: the success of the creation
5558

5559
  """
5560
  info = _GetInstanceInfoText(instance)
5561
  if target_node is None:
5562
    pnode = instance.primary_node
5563
    all_nodes = instance.all_nodes
5564
  else:
5565
    pnode = target_node
5566
    all_nodes = [pnode]
5567

    
5568
  if instance.disk_template == constants.DT_FILE:
5569
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
5570
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
5571

    
5572
    result.Raise("Failed to create directory '%s' on"
5573
                 " node %s" % (file_storage_dir, pnode))
5574

    
5575
  # Note: this needs to be kept in sync with adding of disks in
5576
  # LUSetInstanceParams
5577
  for idx, device in enumerate(instance.disks):
5578
    if to_skip and idx in to_skip:
5579
      continue
5580
    logging.info("Creating volume %s for instance %s",
5581
                 device.iv_name, instance.name)
5582
    #HARDCODE
5583
    for node in all_nodes:
5584
      f_create = node == pnode
5585
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
5586

    
5587

    
5588
def _RemoveDisks(lu, instance, target_node=None):
5589
  """Remove all disks for an instance.
5590

5591
  This abstracts away some work from `AddInstance()` and
5592
  `RemoveInstance()`. Note that in case some of the devices couldn't
5593
  be removed, the removal will continue with the other ones (compare
5594
  with `_CreateDisks()`).
5595

5596
  @type lu: L{LogicalUnit}
5597
  @param lu: the logical unit on whose behalf we execute
5598
  @type instance: L{objects.Instance}
5599
  @param instance: the instance whose disks we should remove
5600
  @type target_node: string
5601
  @param target_node: used to override the node on which to remove the disks
5602
  @rtype: boolean
5603
  @return: the success of the removal
5604

5605
  """
5606
  logging.info("Removing block devices for instance %s", instance.name)
5607

    
5608
  all_result = True
5609
  for device in instance.disks:
5610
    if target_node:
5611
      edata = [(target_node, device)]
5612
    else:
5613
      edata = device.ComputeNodeTree(instance.primary_node)
5614
    for node, disk in edata:
5615
      lu.cfg.SetDiskID(disk, node)
5616
      msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
5617
      if msg:
5618
        lu.LogWarning("Could not remove block device %s on node %s,"
5619
                      " continuing anyway: %s", device.iv_name, node, msg)
5620
        all_result = False
5621

    
5622
  if instance.disk_template == constants.DT_FILE:
5623
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
5624
    if target_node:
5625
      tgt = target_node
5626
    else:
5627
      tgt = instance.primary_node
5628
    result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
5629
    if result.fail_msg:
5630
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
5631
                    file_storage_dir, instance.primary_node, result.fail_msg)
5632
      all_result = False
5633

    
5634
  return all_result
5635

    
5636

    
5637
def _ComputeDiskSize(disk_template, disks):
5638
  """Compute disk size requirements in the volume group
5639

5640
  """
5641
  # Required free disk space as a function of disk and swap space
5642
  req_size_dict = {
5643
    constants.DT_DISKLESS: None,
5644
    constants.DT_PLAIN: sum(d["size"] for d in disks),
5645
    # 128 MB are added for drbd metadata for each disk
5646
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
5647
    constants.DT_FILE: None,
5648
  }
5649

    
5650
  if disk_template not in req_size_dict:
5651
    raise errors.ProgrammerError("Disk template '%s' size requirement"
5652
                                 " is unknown" %  disk_template)
5653

    
5654
  return req_size_dict[disk_template]
5655

    
5656

    
5657
def _CheckHVParams(lu, nodenames, hvname, hvparams):
5658
  """Hypervisor parameter validation.
5659

5660
  This function abstract the hypervisor parameter validation to be
5661
  used in both instance create and instance modify.
5662

5663
  @type lu: L{LogicalUnit}
5664
  @param lu: the logical unit for which we check
5665
  @type nodenames: list
5666
  @param nodenames: the list of nodes on which we should check
5667
  @type hvname: string
5668
  @param hvname: the name of the hypervisor we should use
5669
  @type hvparams: dict
5670
  @param hvparams: the parameters which we need to check
5671
  @raise errors.OpPrereqError: if the parameters are not valid
5672

5673
  """
5674
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
5675
                                                  hvname,
5676
                                                  hvparams)
5677
  for node in nodenames:
5678
    info = hvinfo[node]
5679
    if info.offline:
5680
      continue
5681
    info.Raise("Hypervisor parameter validation failed on node %s" % node)
5682

    
5683

    
5684
class LUCreateInstance(LogicalUnit):
5685
  """Create an instance.
5686

5687
  """
5688
  HPATH = "instance-add"
5689
  HTYPE = constants.HTYPE_INSTANCE
5690
  _OP_REQP = ["instance_name", "disks", "disk_template",
5691
              "mode", "start",
5692
              "wait_for_sync", "ip_check", "nics",
5693
              "hvparams", "beparams"]
5694
  REQ_BGL = False
5695

    
5696
  def CheckArguments(self):
5697
    """Check arguments.
5698

5699
    """
5700
    # do not require name_check to ease forward/backward compatibility
5701
    # for tools
5702
    if not hasattr(self.op, "name_check"):
5703
      self.op.name_check = True
5704
    # validate/normalize the instance name
5705
    self.op.instance_name = utils.HostInfo.NormalizeName(self.op.instance_name)
5706
    if self.op.ip_check and not self.op.name_check:
5707
      # TODO: make the ip check more flexible and not depend on the name check
5708
      raise errors.OpPrereqError("Cannot do ip checks without a name check",
5709
                                 errors.ECODE_INVAL)
5710
    if (self.op.disk_template == constants.DT_FILE and
5711
        not constants.ENABLE_FILE_STORAGE):
5712
      raise errors.OpPrereqError("File storage disabled at configure time",
5713
                                 errors.ECODE_INVAL)
5714

    
5715
  def ExpandNames(self):
5716
    """ExpandNames for CreateInstance.
5717

5718
    Figure out the right locks for instance creation.
5719

5720
    """
5721
    self.needed_locks = {}
5722

    
5723
    # set optional parameters to none if they don't exist
5724
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
5725
      if not hasattr(self.op, attr):
5726
        setattr(self.op, attr, None)
5727

    
5728
    # cheap checks, mostly valid constants given
5729

    
5730
    # verify creation mode
5731
    if self.op.mode not in (constants.INSTANCE_CREATE,
5732
                            constants.INSTANCE_IMPORT):
5733
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
5734
                                 self.op.mode, errors.ECODE_INVAL)
5735

    
5736
    # disk template and mirror node verification
5737
    if self.op.disk_template not in constants.DISK_TEMPLATES:
5738
      raise errors.OpPrereqError("Invalid disk template name",
5739
                                 errors.ECODE_INVAL)
5740

    
5741
    if self.op.hypervisor is None:
5742
      self.op.hypervisor = self.cfg.GetHypervisorType()
5743

    
5744
    cluster = self.cfg.GetClusterInfo()
5745
    enabled_hvs = cluster.enabled_hypervisors
5746
    if self.op.hypervisor not in enabled_hvs:
5747
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
5748
                                 " cluster (%s)" % (self.op.hypervisor,
5749
                                  ",".join(enabled_hvs)),
5750
                                 errors.ECODE_STATE)
5751

    
5752
    # check hypervisor parameter syntax (locally)
5753
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
5754
    filled_hvp = objects.FillDict(cluster.hvparams[self.op.hypervisor],
5755
                                  self.op.hvparams)
5756
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
5757
    hv_type.CheckParameterSyntax(filled_hvp)
5758
    self.hv_full = filled_hvp
5759
    # check that we don't specify global parameters on an instance
5760
    _CheckGlobalHvParams(self.op.hvparams)
5761

    
5762
    # fill and remember the beparams dict
5763
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
5764
    self.be_full = objects.FillDict(cluster.beparams[constants.PP_DEFAULT],
5765
                                    self.op.beparams)
5766

    
5767
    #### instance parameters check
5768

    
5769
    # instance name verification
5770
    if self.op.name_check:
5771
      hostname1 = utils.GetHostInfo(self.op.instance_name)
5772
      self.op.instance_name = instance_name = hostname1.name
5773
      # used in CheckPrereq for ip ping check
5774
      self.check_ip = hostname1.ip
5775
    else:
5776
      instance_name = self.op.instance_name
5777
      self.check_ip = None
5778

    
5779
    # this is just a preventive check, but someone might still add this
5780
    # instance in the meantime, and creation will fail at lock-add time
5781
    if instance_name in self.cfg.GetInstanceList():
5782
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
5783
                                 instance_name, errors.ECODE_EXISTS)
5784

    
5785
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
5786

    
5787
    # NIC buildup
5788
    self.nics = []
5789
    for idx, nic in enumerate(self.op.nics):
5790
      nic_mode_req = nic.get("mode", None)
5791
      nic_mode = nic_mode_req
5792
      if nic_mode is None:
5793
        nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
5794

    
5795
      # in routed mode, for the first nic, the default ip is 'auto'
5796
      if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
5797
        default_ip_mode = constants.VALUE_AUTO
5798
      else:
5799
        default_ip_mode = constants.VALUE_NONE
5800

    
5801
      # ip validity checks
5802
      ip = nic.get("ip", default_ip_mode)
5803
      if ip is None or ip.lower() == constants.VALUE_NONE:
5804
        nic_ip = None
5805
      elif ip.lower() == constants.VALUE_AUTO:
5806
        if not self.op.name_check:
5807
          raise errors.OpPrereqError("IP address set to auto but name checks"
5808
                                     " have been skipped. Aborting.",
5809
                                     errors.ECODE_INVAL)
5810
        nic_ip = hostname1.ip
5811
      else:
5812
        if not utils.IsValidIP(ip):
5813
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
5814
                                     " like a valid IP" % ip,
5815
                                     errors.ECODE_INVAL)
5816
        nic_ip = ip
5817

    
5818
      # TODO: check the ip address for uniqueness
5819
      if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
5820
        raise errors.OpPrereqError("Routed nic mode requires an ip address",
5821
                                   errors.ECODE_INVAL)
5822

    
5823
      # MAC address verification
5824
      mac = nic.get("mac", constants.VALUE_AUTO)
5825
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5826
        mac = utils.NormalizeAndValidateMac(mac)
5827

    
5828
        try:
5829
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
5830
        except errors.ReservationError:
5831
          raise errors.OpPrereqError("MAC address %s already in use"
5832
                                     " in cluster" % mac,
5833
                                     errors.ECODE_NOTUNIQUE)
5834

    
5835
      # bridge verification
5836
      bridge = nic.get("bridge", None)
5837
      link = nic.get("link", None)
5838
      if bridge and link:
5839
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
5840
                                   " at the same time", errors.ECODE_INVAL)
5841
      elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
5842
        raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic",
5843
                                   errors.ECODE_INVAL)
5844
      elif bridge:
5845
        link = bridge
5846

    
5847
      nicparams = {}
5848
      if nic_mode_req:
5849
        nicparams[constants.NIC_MODE] = nic_mode_req
5850
      if link:
5851
        nicparams[constants.NIC_LINK] = link
5852

    
5853
      check_params = objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
5854
                                      nicparams)
5855
      objects.NIC.CheckParameterSyntax(check_params)
5856
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
5857

    
5858
    # disk checks/pre-build
5859
    self.disks = []
5860
    for disk in self.op.disks:
5861
      mode = disk.get("mode", constants.DISK_RDWR)
5862
      if mode not in constants.DISK_ACCESS_SET:
5863
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
5864
                                   mode, errors.ECODE_INVAL)
5865
      size = disk.get("size", None)
5866
      if size is None:
5867
        raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
5868
      try:
5869
        size = int(size)
5870
      except (TypeError, ValueError):
5871
        raise errors.OpPrereqError("Invalid disk size '%s'" % size,
5872
                                   errors.ECODE_INVAL)
5873
      self.disks.append({"size": size, "mode": mode})
5874

    
5875
    # file storage checks
5876
    if (self.op.file_driver and
5877
        not self.op.file_driver in constants.FILE_DRIVER):
5878
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
5879
                                 self.op.file_driver, errors.ECODE_INVAL)
5880

    
5881
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
5882
      raise errors.OpPrereqError("File storage directory path not absolute",
5883
                                 errors.ECODE_INVAL)
5884

    
5885
    ### Node/iallocator related checks
5886
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
5887
      raise errors.OpPrereqError("One and only one of iallocator and primary"
5888
                                 " node must be given",
5889
                                 errors.ECODE_INVAL)
5890

    
5891
    if self.op.iallocator:
5892
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5893
    else:
5894
      self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
5895
      nodelist = [self.op.pnode]
5896
      if self.op.snode is not None:
5897
        self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
5898
        nodelist.append(self.op.snode)
5899
      self.needed_locks[locking.LEVEL_NODE] = nodelist
5900

    
5901
    # in case of import lock the source node too
5902
    if self.op.mode == constants.INSTANCE_IMPORT:
5903
      src_node = getattr(self.op, "src_node", None)
5904
      src_path = getattr(self.op, "src_path", None)
5905

    
5906
      if src_path is None:
5907
        self.op.src_path = src_path = self.op.instance_name
5908

    
5909
      if src_node is None:
5910
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5911
        self.op.src_node = None
5912
        if os.path.isabs(src_path):
5913
          raise errors.OpPrereqError("Importing an instance from an absolute"
5914
                                     " path requires a source node option.",
5915
                                     errors.ECODE_INVAL)
5916
      else:
5917
        self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
5918
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
5919
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
5920
        if not os.path.isabs(src_path):
5921
          self.op.src_path = src_path = \
5922
            utils.PathJoin(constants.EXPORT_DIR, src_path)
5923

    
5924
      # On import force_variant must be True, because if we forced it at
5925
      # initial install, our only chance when importing it back is that it
5926
      # works again!
5927
      self.op.force_variant = True
5928

    
5929
    else: # INSTANCE_CREATE
5930
      if getattr(self.op, "os_type", None) is None:
5931
        raise errors.OpPrereqError("No guest OS specified",
5932
                                   errors.ECODE_INVAL)
5933
      self.op.force_variant = getattr(self.op, "force_variant", False)
5934

    
5935
  def _RunAllocator(self):
5936
    """Run the allocator based on input opcode.
5937

5938
    """
5939
    nics = [n.ToDict() for n in self.nics]
5940
    ial = IAllocator(self.cfg, self.rpc,
5941
                     mode=constants.IALLOCATOR_MODE_ALLOC,
5942
                     name=self.op.instance_name,
5943
                     disk_template=self.op.disk_template,
5944
                     tags=[],
5945
                     os=self.op.os_type,
5946
                     vcpus=self.be_full[constants.BE_VCPUS],
5947
                     mem_size=self.be_full[constants.BE_MEMORY],
5948
                     disks=self.disks,
5949
                     nics=nics,
5950
                     hypervisor=self.op.hypervisor,
5951
                     )
5952

    
5953
    ial.Run(self.op.iallocator)
5954

    
5955
    if not ial.success:
5956
      raise errors.OpPrereqError("Can't compute nodes using"
5957
                                 " iallocator '%s': %s" %
5958
                                 (self.op.iallocator, ial.info),
5959
                                 errors.ECODE_NORES)
5960
    if len(ial.result) != ial.required_nodes:
5961
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
5962
                                 " of nodes (%s), required %s" %
5963
                                 (self.op.iallocator, len(ial.result),
5964
                                  ial.required_nodes), errors.ECODE_FAULT)
5965
    self.op.pnode = ial.result[0]
5966
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
5967
                 self.op.instance_name, self.op.iallocator,
5968
                 utils.CommaJoin(ial.result))
5969
    if ial.required_nodes == 2:
5970
      self.op.snode = ial.result[1]
5971

    
5972
  def BuildHooksEnv(self):
5973
    """Build hooks env.
5974

5975
    This runs on master, primary and secondary nodes of the instance.
5976

5977
    """
5978
    env = {
5979
      "ADD_MODE": self.op.mode,
5980
      }
5981
    if self.op.mode == constants.INSTANCE_IMPORT:
5982
      env["SRC_NODE"] = self.op.src_node
5983
      env["SRC_PATH"] = self.op.src_path
5984
      env["SRC_IMAGES"] = self.src_images
5985

    
5986
    env.update(_BuildInstanceHookEnv(
5987
      name=self.op.instance_name,
5988
      primary_node=self.op.pnode,
5989
      secondary_nodes=self.secondaries,
5990
      status=self.op.start,
5991
      os_type=self.op.os_type,
5992
      memory=self.be_full[constants.BE_MEMORY],
5993
      vcpus=self.be_full[constants.BE_VCPUS],
5994
      nics=_NICListToTuple(self, self.nics),
5995
      disk_template=self.op.disk_template,
5996
      disks=[(d["size"], d["mode"]) for d in self.disks],
5997
      bep=self.be_full,
5998
      hvp=self.hv_full,
5999
      hypervisor_name=self.op.hypervisor,
6000
    ))
6001

    
6002
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
6003
          self.secondaries)
6004
    return env, nl, nl
6005

    
6006

    
6007
  def CheckPrereq(self):
6008
    """Check prerequisites.
6009

6010
    """
6011
    if (not self.cfg.GetVGName() and
6012
        self.op.disk_template not in constants.DTS_NOT_LVM):
6013
      raise errors.OpPrereqError("Cluster does not support lvm-based"
6014
                                 " instances", errors.ECODE_STATE)
6015

    
6016
    if self.op.mode == constants.INSTANCE_IMPORT:
6017
      src_node = self.op.src_node
6018
      src_path = self.op.src_path
6019

    
6020
      if src_node is None:
6021
        locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
6022
        exp_list = self.rpc.call_export_list(locked_nodes)
6023
        found = False
6024
        for node in exp_list:
6025
          if exp_list[node].fail_msg:
6026
            continue
6027
          if src_path in exp_list[node].payload:
6028
            found = True
6029
            self.op.src_node = src_node = node
6030
            self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
6031
                                                         src_path)
6032
            break
6033
        if not found:
6034
          raise errors.OpPrereqError("No export found for relative path %s" %
6035
                                      src_path, errors.ECODE_INVAL)
6036

    
6037
      _CheckNodeOnline(self, src_node)
6038
      result = self.rpc.call_export_info(src_node, src_path)
6039
      result.Raise("No export or invalid export found in dir %s" % src_path)
6040

    
6041
      export_info = objects.SerializableConfigParser.Loads(str(result.payload))
6042
      if not export_info.has_section(constants.INISECT_EXP):
6043
        raise errors.ProgrammerError("Corrupted export config",
6044
                                     errors.ECODE_ENVIRON)
6045

    
6046
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
6047
      if (int(ei_version) != constants.EXPORT_VERSION):
6048
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
6049
                                   (ei_version, constants.EXPORT_VERSION),
6050
                                   errors.ECODE_ENVIRON)
6051

    
6052
      # Check that the new instance doesn't have less disks than the export
6053
      instance_disks = len(self.disks)
6054
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
6055
      if instance_disks < export_disks:
6056
        raise errors.OpPrereqError("Not enough disks to import."
6057
                                   " (instance: %d, export: %d)" %
6058
                                   (instance_disks, export_disks),
6059
                                   errors.ECODE_INVAL)
6060

    
6061
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
6062
      disk_images = []
6063
      for idx in range(export_disks):
6064
        option = 'disk%d_dump' % idx
6065
        if export_info.has_option(constants.INISECT_INS, option):
6066
          # FIXME: are the old os-es, disk sizes, etc. useful?
6067
          export_name = export_info.get(constants.INISECT_INS, option)
6068
          image = utils.PathJoin(src_path, export_name)
6069
          disk_images.append(image)
6070
        else:
6071
          disk_images.append(False)
6072

    
6073
      self.src_images = disk_images
6074

    
6075
      old_name = export_info.get(constants.INISECT_INS, 'name')
6076
      # FIXME: int() here could throw a ValueError on broken exports
6077
      exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
6078
      if self.op.instance_name == old_name:
6079
        for idx, nic in enumerate(self.nics):
6080
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
6081
            nic_mac_ini = 'nic%d_mac' % idx
6082
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
6083

    
6084
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
6085

    
6086
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
6087
    if self.op.ip_check:
6088
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
6089
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
6090
                                   (self.check_ip, self.op.instance_name),
6091
                                   errors.ECODE_NOTUNIQUE)
6092

    
6093
    #### mac address generation
6094
    # By generating here the mac address both the allocator and the hooks get
6095
    # the real final mac address rather than the 'auto' or 'generate' value.
6096
    # There is a race condition between the generation and the instance object
6097
    # creation, which means that we know the mac is valid now, but we're not
6098
    # sure it will be when we actually add the instance. If things go bad
6099
    # adding the instance will abort because of a duplicate mac, and the
6100
    # creation job will fail.
6101
    for nic in self.nics:
6102
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
6103
        nic.mac = self.cfg.GenerateMAC(self.proc.GetECId())
6104

    
6105
    #### allocator run
6106

    
6107
    if self.op.iallocator is not None:
6108
      self._RunAllocator()
6109

    
6110
    #### node related checks
6111

    
6112
    # check primary node
6113
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
6114
    assert self.pnode is not None, \
6115
      "Cannot retrieve locked node %s" % self.op.pnode
6116
    if pnode.offline:
6117
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
6118
                                 pnode.name, errors.ECODE_STATE)
6119
    if pnode.drained:
6120
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
6121
                                 pnode.name, errors.ECODE_STATE)
6122

    
6123
    self.secondaries = []
6124

    
6125
    # mirror node verification
6126
    if self.op.disk_template in constants.DTS_NET_MIRROR:
6127
      if self.op.snode is None:
6128
        raise errors.OpPrereqError("The networked disk templates need"
6129
                                   " a mirror node", errors.ECODE_INVAL)
6130
      if self.op.snode == pnode.name:
6131
        raise errors.OpPrereqError("The secondary node cannot be the"
6132
                                   " primary node.", errors.ECODE_INVAL)
6133
      _CheckNodeOnline(self, self.op.snode)
6134
      _CheckNodeNotDrained(self, self.op.snode)
6135
      self.secondaries.append(self.op.snode)
6136

    
6137
    nodenames = [pnode.name] + self.secondaries
6138

    
6139
    req_size = _ComputeDiskSize(self.op.disk_template,
6140
                                self.disks)
6141

    
6142
    # Check lv size requirements
6143
    if req_size is not None:
6144
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
6145
                                         self.op.hypervisor)
6146
      for node in nodenames:
6147
        info = nodeinfo[node]
6148
        info.Raise("Cannot get current information from node %s" % node)
6149
        info = info.payload
6150
        vg_free = info.get('vg_free', None)
6151
        if not isinstance(vg_free, int):
6152
          raise errors.OpPrereqError("Can't compute free disk space on"
6153
                                     " node %s" % node, errors.ECODE_ENVIRON)
6154
        if req_size > vg_free:
6155
          raise errors.OpPrereqError("Not enough disk space on target node %s."
6156
                                     " %d MB available, %d MB required" %
6157
                                     (node, vg_free, req_size),
6158
                                     errors.ECODE_NORES)
6159

    
6160
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
6161

    
6162
    # os verification
6163
    result = self.rpc.call_os_get(pnode.name, self.op.os_type)
6164
    result.Raise("OS '%s' not in supported os list for primary node %s" %
6165
                 (self.op.os_type, pnode.name),
6166
                 prereq=True, ecode=errors.ECODE_INVAL)
6167
    if not self.op.force_variant:
6168
      _CheckOSVariant(result.payload, self.op.os_type)
6169

    
6170
    _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
6171

    
6172
    # memory check on primary node
6173
    if self.op.start:
6174
      _CheckNodeFreeMemory(self, self.pnode.name,
6175
                           "creating instance %s" % self.op.instance_name,
6176
                           self.be_full[constants.BE_MEMORY],
6177
                           self.op.hypervisor)
6178

    
6179
    self.dry_run_result = list(nodenames)
6180

    
6181
  def Exec(self, feedback_fn):
6182
    """Create and add the instance to the cluster.
6183

6184
    """
6185
    instance = self.op.instance_name
6186
    pnode_name = self.pnode.name
6187

    
6188
    ht_kind = self.op.hypervisor
6189
    if ht_kind in constants.HTS_REQ_PORT:
6190
      network_port = self.cfg.AllocatePort()
6191
    else:
6192
      network_port = None
6193

    
6194
    ##if self.op.vnc_bind_address is None:
6195
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
6196

    
6197
    # this is needed because os.path.join does not accept None arguments
6198
    if self.op.file_storage_dir is None:
6199
      string_file_storage_dir = ""
6200
    else:
6201
      string_file_storage_dir = self.op.file_storage_dir
6202

    
6203
    # build the full file storage dir path
6204
    file_storage_dir = utils.PathJoin(self.cfg.GetFileStorageDir(),
6205
                                      string_file_storage_dir, instance)
6206

    
6207

    
6208
    disks = _GenerateDiskTemplate(self,
6209
                                  self.op.disk_template,
6210
                                  instance, pnode_name,
6211
                                  self.secondaries,
6212
                                  self.disks,
6213
                                  file_storage_dir,
6214
                                  self.op.file_driver,
6215
                                  0)
6216

    
6217
    iobj = objects.Instance(name=instance, os=self.op.os_type,
6218
                            primary_node=pnode_name,
6219
                            nics=self.nics, disks=disks,
6220
                            disk_template=self.op.disk_template,
6221
                            admin_up=False,
6222
                            network_port=network_port,
6223
                            beparams=self.op.beparams,
6224
                            hvparams=self.op.hvparams,
6225
                            hypervisor=self.op.hypervisor,
6226
                            )
6227

    
6228
    feedback_fn("* creating instance disks...")
6229
    try:
6230
      _CreateDisks(self, iobj)
6231
    except errors.OpExecError:
6232
      self.LogWarning("Device creation failed, reverting...")
6233
      try:
6234
        _RemoveDisks(self, iobj)
6235
      finally:
6236
        self.cfg.ReleaseDRBDMinors(instance)
6237
        raise
6238

    
6239
    feedback_fn("adding instance %s to cluster config" % instance)
6240

    
6241
    self.cfg.AddInstance(iobj, self.proc.GetECId())
6242

    
6243
    # Declare that we don't want to remove the instance lock anymore, as we've
6244
    # added the instance to the config
6245
    del self.remove_locks[locking.LEVEL_INSTANCE]
6246
    # Unlock all the nodes
6247
    if self.op.mode == constants.INSTANCE_IMPORT:
6248
      nodes_keep = [self.op.src_node]
6249
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
6250
                       if node != self.op.src_node]
6251
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
6252
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
6253
    else:
6254
      self.context.glm.release(locking.LEVEL_NODE)
6255
      del self.acquired_locks[locking.LEVEL_NODE]
6256

    
6257
    if self.op.wait_for_sync:
6258
      disk_abort = not _WaitForSync(self, iobj)
6259
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
6260
      # make sure the disks are not degraded (still sync-ing is ok)
6261
      time.sleep(15)
6262
      feedback_fn("* checking mirrors status")
6263
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
6264
    else:
6265
      disk_abort = False
6266

    
6267
    if disk_abort:
6268
      _RemoveDisks(self, iobj)
6269
      self.cfg.RemoveInstance(iobj.name)
6270
      # Make sure the instance lock gets removed
6271
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
6272
      raise errors.OpExecError("There are some degraded disks for"
6273
                               " this instance")
6274

    
6275
    feedback_fn("creating os for instance %s on node %s" %
6276
                (instance, pnode_name))
6277

    
6278
    if iobj.disk_template != constants.DT_DISKLESS:
6279
      if self.op.mode == constants.INSTANCE_CREATE:
6280
        feedback_fn("* running the instance OS create scripts...")
6281
        # FIXME: pass debug option from opcode to backend
6282
        result = self.rpc.call_instance_os_add(pnode_name, iobj, False,
6283
                                               self.op.debug_level)
6284
        result.Raise("Could not add os for instance %s"
6285
                     " on node %s" % (instance, pnode_name))
6286

    
6287
      elif self.op.mode == constants.INSTANCE_IMPORT:
6288
        feedback_fn("* running the instance OS import scripts...")
6289
        src_node = self.op.src_node
6290
        src_images = self.src_images
6291
        cluster_name = self.cfg.GetClusterName()
6292
        # FIXME: pass debug option from opcode to backend
6293
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
6294
                                                         src_node, src_images,
6295
                                                         cluster_name,
6296
                                                         self.op.debug_level)
6297
        msg = import_result.fail_msg
6298
        if msg:
6299
          self.LogWarning("Error while importing the disk images for instance"
6300
                          " %s on node %s: %s" % (instance, pnode_name, msg))
6301
      else:
6302
        # also checked in the prereq part
6303
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
6304
                                     % self.op.mode)
6305

    
6306
    if self.op.start:
6307
      iobj.admin_up = True
6308
      self.cfg.Update(iobj, feedback_fn)
6309
      logging.info("Starting instance %s on node %s", instance, pnode_name)
6310
      feedback_fn("* starting instance...")
6311
      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
6312
      result.Raise("Could not start instance")
6313

    
6314
    return list(iobj.all_nodes)
6315

    
6316

    
6317
class LUConnectConsole(NoHooksLU):
6318
  """Connect to an instance's console.
6319

6320
  This is somewhat special in that it returns the command line that
6321
  you need to run on the master node in order to connect to the
6322
  console.
6323

6324
  """
6325
  _OP_REQP = ["instance_name"]
6326
  REQ_BGL = False
6327

    
6328
  def ExpandNames(self):
6329
    self._ExpandAndLockInstance()
6330

    
6331
  def CheckPrereq(self):
6332
    """Check prerequisites.
6333

6334
    This checks that the instance is in the cluster.
6335

6336
    """
6337
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6338
    assert self.instance is not None, \
6339
      "Cannot retrieve locked instance %s" % self.op.instance_name
6340
    _CheckNodeOnline(self, self.instance.primary_node)
6341

    
6342
  def Exec(self, feedback_fn):
6343
    """Connect to the console of an instance
6344

6345
    """
6346
    instance = self.instance
6347
    node = instance.primary_node
6348

    
6349
    node_insts = self.rpc.call_instance_list([node],
6350
                                             [instance.hypervisor])[node]
6351
    node_insts.Raise("Can't get node information from %s" % node)
6352

    
6353
    if instance.name not in node_insts.payload:
6354
      raise errors.OpExecError("Instance %s is not running." % instance.name)
6355

    
6356
    logging.debug("Connecting to console of %s on %s", instance.name, node)
6357

    
6358
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
6359
    cluster = self.cfg.GetClusterInfo()
6360
    # beparams and hvparams are passed separately, to avoid editing the
6361
    # instance and then saving the defaults in the instance itself.
6362
    hvparams = cluster.FillHV(instance)
6363
    beparams = cluster.FillBE(instance)
6364
    console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
6365

    
6366
    # build ssh cmdline
6367
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
6368

    
6369

    
6370
class LUReplaceDisks(LogicalUnit):
6371
  """Replace the disks of an instance.
6372

6373
  """
6374
  HPATH = "mirrors-replace"
6375
  HTYPE = constants.HTYPE_INSTANCE
6376
  _OP_REQP = ["instance_name", "mode", "disks"]
6377
  REQ_BGL = False
6378

    
6379
  def CheckArguments(self):
6380
    if not hasattr(self.op, "remote_node"):
6381
      self.op.remote_node = None
6382
    if not hasattr(self.op, "iallocator"):
6383
      self.op.iallocator = None
6384
    if not hasattr(self.op, "early_release"):
6385
      self.op.early_release = False
6386

    
6387
    TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
6388
                                  self.op.iallocator)
6389

    
6390
  def ExpandNames(self):
6391
    self._ExpandAndLockInstance()
6392

    
6393
    if self.op.iallocator is not None:
6394
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6395

    
6396
    elif self.op.remote_node is not None:
6397
      remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
6398
      self.op.remote_node = remote_node
6399

    
6400
      # Warning: do not remove the locking of the new secondary here
6401
      # unless DRBD8.AddChildren is changed to work in parallel;
6402
      # currently it doesn't since parallel invocations of
6403
      # FindUnusedMinor will conflict
6404
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
6405
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6406

    
6407
    else:
6408
      self.needed_locks[locking.LEVEL_NODE] = []
6409
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6410

    
6411
    self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
6412
                                   self.op.iallocator, self.op.remote_node,
6413
                                   self.op.disks, False, self.op.early_release)
6414

    
6415
    self.tasklets = [self.replacer]
6416

    
6417
  def DeclareLocks(self, level):
6418
    # If we're not already locking all nodes in the set we have to declare the
6419
    # instance's primary/secondary nodes.
6420
    if (level == locking.LEVEL_NODE and
6421
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
6422
      self._LockInstancesNodes()
6423

    
6424
  def BuildHooksEnv(self):
6425
    """Build hooks env.
6426

6427
    This runs on the master, the primary and all the secondaries.
6428

6429
    """
6430
    instance = self.replacer.instance
6431
    env = {
6432
      "MODE": self.op.mode,
6433
      "NEW_SECONDARY": self.op.remote_node,
6434
      "OLD_SECONDARY": instance.secondary_nodes[0],
6435
      }
6436
    env.update(_BuildInstanceHookEnvByObject(self, instance))
6437
    nl = [
6438
      self.cfg.GetMasterNode(),
6439
      instance.primary_node,
6440
      ]
6441
    if self.op.remote_node is not None:
6442
      nl.append(self.op.remote_node)
6443
    return env, nl, nl
6444

    
6445

    
6446
class LUEvacuateNode(LogicalUnit):
6447
  """Relocate the secondary instances from a node.
6448

6449
  """
6450
  HPATH = "node-evacuate"
6451
  HTYPE = constants.HTYPE_NODE
6452
  _OP_REQP = ["node_name"]
6453
  REQ_BGL = False
6454

    
6455
  def CheckArguments(self):
6456
    if not hasattr(self.op, "remote_node"):
6457
      self.op.remote_node = None
6458
    if not hasattr(self.op, "iallocator"):
6459
      self.op.iallocator = None
6460
    if not hasattr(self.op, "early_release"):
6461
      self.op.early_release = False
6462

    
6463
    TLReplaceDisks.CheckArguments(constants.REPLACE_DISK_CHG,
6464
                                  self.op.remote_node,
6465
                                  self.op.iallocator)
6466

    
6467
  def ExpandNames(self):
6468
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
6469

    
6470
    self.needed_locks = {}
6471

    
6472
    # Declare node locks
6473
    if self.op.iallocator is not None:
6474
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6475

    
6476
    elif self.op.remote_node is not None:
6477
      self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
6478

    
6479
      # Warning: do not remove the locking of the new secondary here
6480
      # unless DRBD8.AddChildren is changed to work in parallel;
6481
      # currently it doesn't since parallel invocations of
6482
      # FindUnusedMinor will conflict
6483
      self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node]
6484
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6485

    
6486
    else:
6487
      raise errors.OpPrereqError("Invalid parameters", errors.ECODE_INVAL)
6488

    
6489
    # Create tasklets for replacing disks for all secondary instances on this
6490
    # node
6491
    names = []
6492
    tasklets = []
6493

    
6494
    for inst in _GetNodeSecondaryInstances(self.cfg, self.op.node_name):
6495
      logging.debug("Replacing disks for instance %s", inst.name)
6496
      names.append(inst.name)
6497

    
6498
      replacer = TLReplaceDisks(self, inst.name, constants.REPLACE_DISK_CHG,
6499
                                self.op.iallocator, self.op.remote_node, [],
6500
                                True, self.op.early_release)
6501
      tasklets.append(replacer)
6502

    
6503
    self.tasklets = tasklets
6504
    self.instance_names = names
6505

    
6506
    # Declare instance locks
6507
    self.needed_locks[locking.LEVEL_INSTANCE] = self.instance_names
6508

    
6509
  def DeclareLocks(self, level):
6510
    # If we're not already locking all nodes in the set we have to declare the
6511
    # instance's primary/secondary nodes.
6512
    if (level == locking.LEVEL_NODE and
6513
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
6514
      self._LockInstancesNodes()
6515

    
6516
  def BuildHooksEnv(self):
6517
    """Build hooks env.
6518

6519
    This runs on the master, the primary and all the secondaries.
6520

6521
    """
6522
    env = {
6523
      "NODE_NAME": self.op.node_name,
6524
      }
6525

    
6526
    nl = [self.cfg.GetMasterNode()]
6527

    
6528
    if self.op.remote_node is not None:
6529
      env["NEW_SECONDARY"] = self.op.remote_node
6530
      nl.append(self.op.remote_node)
6531

    
6532
    return (env, nl, nl)
6533

    
6534

    
6535
class TLReplaceDisks(Tasklet):
6536
  """Replaces disks for an instance.
6537

6538
  Note: Locking is not within the scope of this class.
6539

6540
  """
6541
  def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
6542
               disks, delay_iallocator, early_release):
6543
    """Initializes this class.
6544

6545
    """
6546
    Tasklet.__init__(self, lu)
6547

    
6548
    # Parameters
6549
    self.instance_name = instance_name
6550
    self.mode = mode
6551
    self.iallocator_name = iallocator_name
6552
    self.remote_node = remote_node
6553
    self.disks = disks
6554
    self.delay_iallocator = delay_iallocator
6555
    self.early_release = early_release
6556

    
6557
    # Runtime data
6558
    self.instance = None
6559
    self.new_node = None
6560
    self.target_node = None
6561
    self.other_node = None
6562
    self.remote_node_info = None
6563
    self.node_secondary_ip = None
6564

    
6565
  @staticmethod
6566
  def CheckArguments(mode, remote_node, iallocator):
6567
    """Helper function for users of this class.
6568

6569
    """
6570
    # check for valid parameter combination
6571
    if mode == constants.REPLACE_DISK_CHG:
6572
      if remote_node is None and iallocator is None:
6573
        raise errors.OpPrereqError("When changing the secondary either an"
6574
                                   " iallocator script must be used or the"
6575
                                   " new node given", errors.ECODE_INVAL)
6576

    
6577
      if remote_node is not None and iallocator is not None:
6578
        raise errors.OpPrereqError("Give either the iallocator or the new"
6579
                                   " secondary, not both", errors.ECODE_INVAL)
6580

    
6581
    elif remote_node is not None or iallocator is not None:
6582
      # Not replacing the secondary
6583
      raise errors.OpPrereqError("The iallocator and new node options can"
6584
                                 " only be used when changing the"
6585
                                 " secondary node", errors.ECODE_INVAL)
6586

    
6587
  @staticmethod
6588
  def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
6589
    """Compute a new secondary node using an IAllocator.
6590

6591
    """
6592
    ial = IAllocator(lu.cfg, lu.rpc,
6593
                     mode=constants.IALLOCATOR_MODE_RELOC,
6594
                     name=instance_name,
6595
                     relocate_from=relocate_from)
6596

    
6597
    ial.Run(iallocator_name)
6598

    
6599
    if not ial.success:
6600
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
6601
                                 " %s" % (iallocator_name, ial.info),
6602
                                 errors.ECODE_NORES)
6603

    
6604
    if len(ial.result) != ial.required_nodes:
6605
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
6606
                                 " of nodes (%s), required %s" %
6607
                                 (iallocator_name,
6608
                                  len(ial.result), ial.required_nodes),
6609
                                 errors.ECODE_FAULT)
6610

    
6611
    remote_node_name = ial.result[0]
6612

    
6613
    lu.LogInfo("Selected new secondary for instance '%s': %s",
6614
               instance_name, remote_node_name)
6615

    
6616
    return remote_node_name
6617

    
6618
  def _FindFaultyDisks(self, node_name):
6619
    return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
6620
                                    node_name, True)
6621

    
6622
  def CheckPrereq(self):
6623
    """Check prerequisites.
6624

6625
    This checks that the instance is in the cluster.
6626

6627
    """
6628
    self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
6629
    assert instance is not None, \
6630
      "Cannot retrieve locked instance %s" % self.instance_name
6631

    
6632
    if instance.disk_template != constants.DT_DRBD8:
6633
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
6634
                                 " instances", errors.ECODE_INVAL)
6635

    
6636
    if len(instance.secondary_nodes) != 1:
6637
      raise errors.OpPrereqError("The instance has a strange layout,"
6638
                                 " expected one secondary but found %d" %
6639
                                 len(instance.secondary_nodes),
6640
                                 errors.ECODE_FAULT)
6641

    
6642
    if not self.delay_iallocator:
6643
      self._CheckPrereq2()
6644

    
6645
  def _CheckPrereq2(self):
6646
    """Check prerequisites, second part.
6647

6648
    This function should always be part of CheckPrereq. It was separated and is
6649
    now called from Exec because during node evacuation iallocator was only
6650
    called with an unmodified cluster model, not taking planned changes into
6651
    account.
6652

6653
    """
6654
    instance = self.instance
6655
    secondary_node = instance.secondary_nodes[0]
6656

    
6657
    if self.iallocator_name is None:
6658
      remote_node = self.remote_node
6659
    else:
6660
      remote_node = self._RunAllocator(self.lu, self.iallocator_name,
6661
                                       instance.name, instance.secondary_nodes)
6662

    
6663
    if remote_node is not None:
6664
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
6665
      assert self.remote_node_info is not None, \
6666
        "Cannot retrieve locked node %s" % remote_node
6667
    else:
6668
      self.remote_node_info = None
6669

    
6670
    if remote_node == self.instance.primary_node:
6671
      raise errors.OpPrereqError("The specified node is the primary node of"
6672
                                 " the instance.", errors.ECODE_INVAL)
6673

    
6674
    if remote_node == secondary_node:
6675
      raise errors.OpPrereqError("The specified node is already the"
6676
                                 " secondary node of the instance.",
6677
                                 errors.ECODE_INVAL)
6678

    
6679
    if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
6680
                                    constants.REPLACE_DISK_CHG):
6681
      raise errors.OpPrereqError("Cannot specify disks to be replaced",
6682
                                 errors.ECODE_INVAL)
6683

    
6684
    if self.mode == constants.REPLACE_DISK_AUTO:
6685
      faulty_primary = self._FindFaultyDisks(instance.primary_node)
6686
      faulty_secondary = self._FindFaultyDisks(secondary_node)
6687

    
6688
      if faulty_primary and faulty_secondary:
6689
        raise errors.OpPrereqError("Instance %s has faulty disks on more than"
6690
                                   " one node and can not be repaired"
6691
                                   " automatically" % self.instance_name,
6692
                                   errors.ECODE_STATE)
6693

    
6694
      if faulty_primary:
6695
        self.disks = faulty_primary
6696
        self.target_node = instance.primary_node
6697
        self.other_node = secondary_node
6698
        check_nodes = [self.target_node, self.other_node]
6699
      elif faulty_secondary:
6700
        self.disks = faulty_secondary
6701
        self.target_node = secondary_node
6702
        self.other_node = instance.primary_node
6703
        check_nodes = [self.target_node, self.other_node]
6704
      else:
6705
        self.disks = []
6706
        check_nodes = []
6707

    
6708
    else:
6709
      # Non-automatic modes
6710
      if self.mode == constants.REPLACE_DISK_PRI:
6711
        self.target_node = instance.primary_node
6712
        self.other_node = secondary_node
6713
        check_nodes = [self.target_node, self.other_node]
6714

    
6715
      elif self.mode == constants.REPLACE_DISK_SEC:
6716
        self.target_node = secondary_node
6717
        self.other_node = instance.primary_node
6718
        check_nodes = [self.target_node, self.other_node]
6719

    
6720
      elif self.mode == constants.REPLACE_DISK_CHG:
6721
        self.new_node = remote_node
6722
        self.other_node = instance.primary_node
6723
        self.target_node = secondary_node
6724
        check_nodes = [self.new_node, self.other_node]
6725

    
6726
        _CheckNodeNotDrained(self.lu, remote_node)
6727

    
6728
        old_node_info = self.cfg.GetNodeInfo(secondary_node)
6729
        assert old_node_info is not None
6730
        if old_node_info.offline and not self.early_release:
6731
          # doesn't make sense to delay the release
6732
          self.early_release = True
6733
          self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
6734
                          " early-release mode", secondary_node)
6735

    
6736
      else:
6737
        raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
6738
                                     self.mode)
6739

    
6740
      # If not specified all disks should be replaced
6741
      if not self.disks:
6742
        self.disks = range(len(self.instance.disks))
6743

    
6744
    for node in check_nodes:
6745
      _CheckNodeOnline(self.lu, node)
6746

    
6747
    # Check whether disks are valid
6748
    for disk_idx in self.disks:
6749
      instance.FindDisk(disk_idx)
6750

    
6751
    # Get secondary node IP addresses
6752
    node_2nd_ip = {}
6753

    
6754
    for node_name in [self.target_node, self.other_node, self.new_node]:
6755
      if node_name is not None:
6756
        node_2nd_ip[node_name] = self.cfg.GetNodeInfo(node_name).secondary_ip
6757

    
6758
    self.node_secondary_ip = node_2nd_ip
6759

    
6760
  def Exec(self, feedback_fn):
6761
    """Execute disk replacement.
6762

6763
    This dispatches the disk replacement to the appropriate handler.
6764

6765
    """
6766
    if self.delay_iallocator:
6767
      self._CheckPrereq2()
6768

    
6769
    if not self.disks:
6770
      feedback_fn("No disks need replacement")
6771
      return
6772

    
6773
    feedback_fn("Replacing disk(s) %s for %s" %
6774
                (utils.CommaJoin(self.disks), self.instance.name))
6775

    
6776
    activate_disks = (not self.instance.admin_up)
6777

    
6778
    # Activate the instance disks if we're replacing them on a down instance
6779
    if activate_disks:
6780
      _StartInstanceDisks(self.lu, self.instance, True)
6781

    
6782
    try:
6783
      # Should we replace the secondary node?
6784
      if self.new_node is not None:
6785
        fn = self._ExecDrbd8Secondary
6786
      else:
6787
        fn = self._ExecDrbd8DiskOnly
6788

    
6789
      return fn(feedback_fn)
6790

    
6791
    finally:
6792
      # Deactivate the instance disks if we're replacing them on a
6793
      # down instance
6794
      if activate_disks:
6795
        _SafeShutdownInstanceDisks(self.lu, self.instance)
6796

    
6797
  def _CheckVolumeGroup(self, nodes):
6798
    self.lu.LogInfo("Checking volume groups")
6799

    
6800
    vgname = self.cfg.GetVGName()
6801

    
6802
    # Make sure volume group exists on all involved nodes
6803
    results = self.rpc.call_vg_list(nodes)
6804
    if not results:
6805
      raise errors.OpExecError("Can't list volume groups on the nodes")
6806

    
6807
    for node in nodes:
6808
      res = results[node]
6809
      res.Raise("Error checking node %s" % node)
6810
      if vgname not in res.payload:
6811
        raise errors.OpExecError("Volume group '%s' not found on node %s" %
6812
                                 (vgname, node))
6813

    
6814
  def _CheckDisksExistence(self, nodes):
6815
    # Check disk existence
6816
    for idx, dev in enumerate(self.instance.disks):
6817
      if idx not in self.disks:
6818
        continue
6819

    
6820
      for node in nodes:
6821
        self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
6822
        self.cfg.SetDiskID(dev, node)
6823

    
6824
        result = self.rpc.call_blockdev_find(node, dev)
6825

    
6826
        msg = result.fail_msg
6827
        if msg or not result.payload:
6828
          if not msg:
6829
            msg = "disk not found"
6830
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
6831
                                   (idx, node, msg))
6832

    
6833
  def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
6834
    for idx, dev in enumerate(self.instance.disks):
6835
      if idx not in self.disks:
6836
        continue
6837

    
6838
      self.lu.LogInfo("Checking disk/%d consistency on node %s" %
6839
                      (idx, node_name))
6840

    
6841
      if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary,
6842
                                   ldisk=ldisk):
6843
        raise errors.OpExecError("Node %s has degraded storage, unsafe to"
6844
                                 " replace disks for instance %s" %
6845
                                 (node_name, self.instance.name))
6846

    
6847
  def _CreateNewStorage(self, node_name):
6848
    vgname = self.cfg.GetVGName()
6849
    iv_names = {}
6850

    
6851
    for idx, dev in enumerate(self.instance.disks):
6852
      if idx not in self.disks:
6853
        continue
6854

    
6855
      self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
6856

    
6857
      self.cfg.SetDiskID(dev, node_name)
6858

    
6859
      lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
6860
      names = _GenerateUniqueNames(self.lu, lv_names)
6861

    
6862
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
6863
                             logical_id=(vgname, names[0]))
6864
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
6865
                             logical_id=(vgname, names[1]))
6866

    
6867
      new_lvs = [lv_data, lv_meta]
6868
      old_lvs = dev.children
6869
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
6870

    
6871
      # we pass force_create=True to force the LVM creation
6872
      for new_lv in new_lvs:
6873
        _CreateBlockDev(self.lu, node_name, self.instance, new_lv, True,
6874
                        _GetInstanceInfoText(self.instance), False)
6875

    
6876
    return iv_names
6877

    
6878
  def _CheckDevices(self, node_name, iv_names):
6879
    for name, (dev, _, _) in iv_names.iteritems():
6880
      self.cfg.SetDiskID(dev, node_name)
6881

    
6882
      result = self.rpc.call_blockdev_find(node_name, dev)
6883

    
6884
      msg = result.fail_msg
6885
      if msg or not result.payload:
6886
        if not msg:
6887
          msg = "disk not found"
6888
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
6889
                                 (name, msg))
6890

    
6891
      if result.payload.is_degraded:
6892
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
6893

    
6894
  def _RemoveOldStorage(self, node_name, iv_names):
6895
    for name, (_, old_lvs, _) in iv_names.iteritems():
6896
      self.lu.LogInfo("Remove logical volumes for %s" % name)
6897

    
6898
      for lv in old_lvs:
6899
        self.cfg.SetDiskID(lv, node_name)
6900

    
6901
        msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
6902
        if msg:
6903
          self.lu.LogWarning("Can't remove old LV: %s" % msg,
6904
                             hint="remove unused LVs manually")
6905

    
6906
  def _ReleaseNodeLock(self, node_name):
6907
    """Releases the lock for a given node."""
6908
    self.lu.context.glm.release(locking.LEVEL_NODE, node_name)
6909

    
6910
  def _ExecDrbd8DiskOnly(self, feedback_fn):
6911
    """Replace a disk on the primary or secondary for DRBD 8.
6912

6913
    The algorithm for replace is quite complicated:
6914

6915
      1. for each disk to be replaced:
6916

6917
        1. create new LVs on the target node with unique names
6918
        1. detach old LVs from the drbd device
6919
        1. rename old LVs to name_replaced.<time_t>
6920
        1. rename new LVs to old LVs
6921
        1. attach the new LVs (with the old names now) to the drbd device
6922

6923
      1. wait for sync across all devices
6924

6925
      1. for each modified disk:
6926

6927
        1. remove old LVs (which have the name name_replaces.<time_t>)
6928

6929
    Failures are not very well handled.
6930

6931
    """
6932
    steps_total = 6
6933

    
6934
    # Step: check device activation
6935
    self.lu.LogStep(1, steps_total, "Check device existence")
6936
    self._CheckDisksExistence([self.other_node, self.target_node])
6937
    self._CheckVolumeGroup([self.target_node, self.other_node])
6938

    
6939
    # Step: check other node consistency
6940
    self.lu.LogStep(2, steps_total, "Check peer consistency")
6941
    self._CheckDisksConsistency(self.other_node,
6942
                                self.other_node == self.instance.primary_node,
6943
                                False)
6944

    
6945
    # Step: create new storage
6946
    self.lu.LogStep(3, steps_total, "Allocate new storage")
6947
    iv_names = self._CreateNewStorage(self.target_node)
6948

    
6949
    # Step: for each lv, detach+rename*2+attach
6950
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
6951
    for dev, old_lvs, new_lvs in iv_names.itervalues():
6952
      self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
6953

    
6954
      result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
6955
                                                     old_lvs)
6956
      result.Raise("Can't detach drbd from local storage on node"
6957
                   " %s for device %s" % (self.target_node, dev.iv_name))
6958
      #dev.children = []
6959
      #cfg.Update(instance)
6960

    
6961
      # ok, we created the new LVs, so now we know we have the needed
6962
      # storage; as such, we proceed on the target node to rename
6963
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
6964
      # using the assumption that logical_id == physical_id (which in
6965
      # turn is the unique_id on that node)
6966

    
6967
      # FIXME(iustin): use a better name for the replaced LVs
6968
      temp_suffix = int(time.time())
6969
      ren_fn = lambda d, suff: (d.physical_id[0],
6970
                                d.physical_id[1] + "_replaced-%s" % suff)
6971

    
6972
      # Build the rename list based on what LVs exist on the node
6973
      rename_old_to_new = []
6974
      for to_ren in old_lvs:
6975
        result = self.rpc.call_blockdev_find(self.target_node, to_ren)
6976
        if not result.fail_msg and result.payload:
6977
          # device exists
6978
          rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
6979

    
6980
      self.lu.LogInfo("Renaming the old LVs on the target node")
6981
      result = self.rpc.call_blockdev_rename(self.target_node,
6982
                                             rename_old_to_new)
6983
      result.Raise("Can't rename old LVs on node %s" % self.target_node)
6984

    
6985
      # Now we rename the new LVs to the old LVs
6986
      self.lu.LogInfo("Renaming the new LVs on the target node")
6987
      rename_new_to_old = [(new, old.physical_id)
6988
                           for old, new in zip(old_lvs, new_lvs)]
6989
      result = self.rpc.call_blockdev_rename(self.target_node,
6990
                                             rename_new_to_old)
6991
      result.Raise("Can't rename new LVs on node %s" % self.target_node)
6992

    
6993
      for old, new in zip(old_lvs, new_lvs):
6994
        new.logical_id = old.logical_id
6995
        self.cfg.SetDiskID(new, self.target_node)
6996

    
6997
      for disk in old_lvs:
6998
        disk.logical_id = ren_fn(disk, temp_suffix)
6999
        self.cfg.SetDiskID(disk, self.target_node)
7000

    
7001
      # Now that the new lvs have the old name, we can add them to the device
7002
      self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
7003
      result = self.rpc.call_blockdev_addchildren(self.target_node, dev,
7004
                                                  new_lvs)
7005
      msg = result.fail_msg
7006
      if msg:
7007
        for new_lv in new_lvs:
7008
          msg2 = self.rpc.call_blockdev_remove(self.target_node,
7009
                                               new_lv).fail_msg
7010
          if msg2:
7011
            self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
7012
                               hint=("cleanup manually the unused logical"
7013
                                     "volumes"))
7014
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
7015

    
7016
      dev.children = new_lvs
7017

    
7018
      self.cfg.Update(self.instance, feedback_fn)
7019

    
7020
    cstep = 5
7021
    if self.early_release:
7022
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
7023
      cstep += 1
7024
      self._RemoveOldStorage(self.target_node, iv_names)
7025
      # WARNING: we release both node locks here, do not do other RPCs
7026
      # than WaitForSync to the primary node
7027
      self._ReleaseNodeLock([self.target_node, self.other_node])
7028

    
7029
    # Wait for sync
7030
    # This can fail as the old devices are degraded and _WaitForSync
7031
    # does a combined result over all disks, so we don't check its return value
7032
    self.lu.LogStep(cstep, steps_total, "Sync devices")
7033
    cstep += 1
7034
    _WaitForSync(self.lu, self.instance)
7035

    
7036
    # Check all devices manually
7037
    self._CheckDevices(self.instance.primary_node, iv_names)
7038

    
7039
    # Step: remove old storage
7040
    if not self.early_release:
7041
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
7042
      cstep += 1
7043
      self._RemoveOldStorage(self.target_node, iv_names)
7044

    
7045
  def _ExecDrbd8Secondary(self, feedback_fn):
7046
    """Replace the secondary node for DRBD 8.
7047

7048
    The algorithm for replace is quite complicated:
7049
      - for all disks of the instance:
7050
        - create new LVs on the new node with same names
7051
        - shutdown the drbd device on the old secondary
7052
        - disconnect the drbd network on the primary
7053
        - create the drbd device on the new secondary
7054
        - network attach the drbd on the primary, using an artifice:
7055
          the drbd code for Attach() will connect to the network if it
7056
          finds a device which is connected to the good local disks but
7057
          not network enabled
7058
      - wait for sync across all devices
7059
      - remove all disks from the old secondary
7060

7061
    Failures are not very well handled.
7062

7063
    """
7064
    steps_total = 6
7065

    
7066
    # Step: check device activation
7067
    self.lu.LogStep(1, steps_total, "Check device existence")
7068
    self._CheckDisksExistence([self.instance.primary_node])
7069
    self._CheckVolumeGroup([self.instance.primary_node])
7070

    
7071
    # Step: check other node consistency
7072
    self.lu.LogStep(2, steps_total, "Check peer consistency")
7073
    self._CheckDisksConsistency(self.instance.primary_node, True, True)
7074

    
7075
    # Step: create new storage
7076
    self.lu.LogStep(3, steps_total, "Allocate new storage")
7077
    for idx, dev in enumerate(self.instance.disks):
7078
      self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
7079
                      (self.new_node, idx))
7080
      # we pass force_create=True to force LVM creation
7081
      for new_lv in dev.children:
7082
        _CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True,
7083
                        _GetInstanceInfoText(self.instance), False)
7084

    
7085
    # Step 4: dbrd minors and drbd setups changes
7086
    # after this, we must manually remove the drbd minors on both the
7087
    # error and the success paths
7088
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
7089
    minors = self.cfg.AllocateDRBDMinor([self.new_node
7090
                                         for dev in self.instance.disks],
7091
                                        self.instance.name)
7092
    logging.debug("Allocated minors %r", minors)
7093

    
7094
    iv_names = {}
7095
    for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
7096
      self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
7097
                      (self.new_node, idx))
7098
      # create new devices on new_node; note that we create two IDs:
7099
      # one without port, so the drbd will be activated without
7100
      # networking information on the new node at this stage, and one
7101
      # with network, for the latter activation in step 4
7102
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
7103
      if self.instance.primary_node == o_node1:
7104
        p_minor = o_minor1
7105
      else:
7106
        assert self.instance.primary_node == o_node2, "Three-node instance?"
7107
        p_minor = o_minor2
7108

    
7109
      new_alone_id = (self.instance.primary_node, self.new_node, None,
7110
                      p_minor, new_minor, o_secret)
7111
      new_net_id = (self.instance.primary_node, self.new_node, o_port,
7112
                    p_minor, new_minor, o_secret)
7113

    
7114
      iv_names[idx] = (dev, dev.children, new_net_id)
7115
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
7116
                    new_net_id)
7117
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
7118
                              logical_id=new_alone_id,
7119
                              children=dev.children,
7120
                              size=dev.size)
7121
      try:
7122
        _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
7123
                              _GetInstanceInfoText(self.instance), False)
7124
      except errors.GenericError:
7125
        self.cfg.ReleaseDRBDMinors(self.instance.name)
7126
        raise
7127

    
7128
    # We have new devices, shutdown the drbd on the old secondary
7129
    for idx, dev in enumerate(self.instance.disks):
7130
      self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
7131
      self.cfg.SetDiskID(dev, self.target_node)
7132
      msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
7133
      if msg:
7134
        self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
7135
                           "node: %s" % (idx, msg),
7136
                           hint=("Please cleanup this device manually as"
7137
                                 " soon as possible"))
7138

    
7139
    self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
7140
    result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node],
7141
                                               self.node_secondary_ip,
7142
                                               self.instance.disks)\
7143
                                              [self.instance.primary_node]
7144

    
7145
    msg = result.fail_msg
7146
    if msg:
7147
      # detaches didn't succeed (unlikely)
7148
      self.cfg.ReleaseDRBDMinors(self.instance.name)
7149
      raise errors.OpExecError("Can't detach the disks from the network on"
7150
                               " old node: %s" % (msg,))
7151

    
7152
    # if we managed to detach at least one, we update all the disks of
7153
    # the instance to point to the new secondary
7154
    self.lu.LogInfo("Updating instance configuration")
7155
    for dev, _, new_logical_id in iv_names.itervalues():
7156
      dev.logical_id = new_logical_id
7157
      self.cfg.SetDiskID(dev, self.instance.primary_node)
7158

    
7159
    self.cfg.Update(self.instance, feedback_fn)
7160

    
7161
    # and now perform the drbd attach
7162
    self.lu.LogInfo("Attaching primary drbds to new secondary"
7163
                    " (standalone => connected)")
7164
    result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
7165
                                            self.new_node],
7166
                                           self.node_secondary_ip,
7167
                                           self.instance.disks,
7168
                                           self.instance.name,
7169
                                           False)
7170
    for to_node, to_result in result.items():
7171
      msg = to_result.fail_msg
7172
      if msg:
7173
        self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
7174
                           to_node, msg,
7175
                           hint=("please do a gnt-instance info to see the"
7176
                                 " status of disks"))
7177
    cstep = 5
7178
    if self.early_release:
7179
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
7180
      cstep += 1
7181
      self._RemoveOldStorage(self.target_node, iv_names)
7182
      # WARNING: we release all node locks here, do not do other RPCs
7183
      # than WaitForSync to the primary node
7184
      self._ReleaseNodeLock([self.instance.primary_node,
7185
                             self.target_node,
7186
                             self.new_node])
7187

    
7188
    # Wait for sync
7189
    # This can fail as the old devices are degraded and _WaitForSync
7190
    # does a combined result over all disks, so we don't check its return value
7191
    self.lu.LogStep(cstep, steps_total, "Sync devices")
7192
    cstep += 1
7193
    _WaitForSync(self.lu, self.instance)
7194

    
7195
    # Check all devices manually
7196
    self._CheckDevices(self.instance.primary_node, iv_names)
7197

    
7198
    # Step: remove old storage
7199
    if not self.early_release:
7200
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
7201
      self._RemoveOldStorage(self.target_node, iv_names)
7202

    
7203

    
7204
class LURepairNodeStorage(NoHooksLU):
7205
  """Repairs the volume group on a node.
7206

7207
  """
7208
  _OP_REQP = ["node_name"]
7209
  REQ_BGL = False
7210

    
7211
  def CheckArguments(self):
7212
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
7213

    
7214
  def ExpandNames(self):
7215
    self.needed_locks = {
7216
      locking.LEVEL_NODE: [self.op.node_name],
7217
      }
7218

    
7219
  def _CheckFaultyDisks(self, instance, node_name):
7220
    """Ensure faulty disks abort the opcode or at least warn."""
7221
    try:
7222
      if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
7223
                                  node_name, True):
7224
        raise errors.OpPrereqError("Instance '%s' has faulty disks on"
7225
                                   " node '%s'" % (instance.name, node_name),
7226
                                   errors.ECODE_STATE)
7227
    except errors.OpPrereqError, err:
7228
      if self.op.ignore_consistency:
7229
        self.proc.LogWarning(str(err.args[0]))
7230
      else:
7231
        raise
7232

    
7233
  def CheckPrereq(self):
7234
    """Check prerequisites.
7235

7236
    """
7237
    storage_type = self.op.storage_type
7238

    
7239
    if (constants.SO_FIX_CONSISTENCY not in
7240
        constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
7241
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
7242
                                 " repaired" % storage_type,
7243
                                 errors.ECODE_INVAL)
7244

    
7245
    # Check whether any instance on this node has faulty disks
7246
    for inst in _GetNodeInstances(self.cfg, self.op.node_name):
7247
      if not inst.admin_up:
7248
        continue
7249
      check_nodes = set(inst.all_nodes)
7250
      check_nodes.discard(self.op.node_name)
7251
      for inst_node_name in check_nodes:
7252
        self._CheckFaultyDisks(inst, inst_node_name)
7253

    
7254
  def Exec(self, feedback_fn):
7255
    feedback_fn("Repairing storage unit '%s' on %s ..." %
7256
                (self.op.name, self.op.node_name))
7257

    
7258
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
7259
    result = self.rpc.call_storage_execute(self.op.node_name,
7260
                                           self.op.storage_type, st_args,
7261
                                           self.op.name,
7262
                                           constants.SO_FIX_CONSISTENCY)
7263
    result.Raise("Failed to repair storage unit '%s' on %s" %
7264
                 (self.op.name, self.op.node_name))
7265

    
7266

    
7267
class LUNodeEvacuationStrategy(NoHooksLU):
7268
  """Computes the node evacuation strategy.
7269

7270
  """
7271
  _OP_REQP = ["nodes"]
7272
  REQ_BGL = False
7273

    
7274
  def CheckArguments(self):
7275
    if not hasattr(self.op, "remote_node"):
7276
      self.op.remote_node = None
7277
    if not hasattr(self.op, "iallocator"):
7278
      self.op.iallocator = None
7279
    if self.op.remote_node is not None and self.op.iallocator is not None:
7280
      raise errors.OpPrereqError("Give either the iallocator or the new"
7281
                                 " secondary, not both", errors.ECODE_INVAL)
7282

    
7283
  def ExpandNames(self):
7284
    self.op.nodes = _GetWantedNodes(self, self.op.nodes)
7285
    self.needed_locks = locks = {}
7286
    if self.op.remote_node is None:
7287
      locks[locking.LEVEL_NODE] = locking.ALL_SET
7288
    else:
7289
      self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
7290
      locks[locking.LEVEL_NODE] = self.op.nodes + [self.op.remote_node]
7291

    
7292
  def CheckPrereq(self):
7293
    pass
7294

    
7295
  def Exec(self, feedback_fn):
7296
    if self.op.remote_node is not None:
7297
      instances = []
7298
      for node in self.op.nodes:
7299
        instances.extend(_GetNodeSecondaryInstances(self.cfg, node))
7300
      result = []
7301
      for i in instances:
7302
        if i.primary_node == self.op.remote_node:
7303
          raise errors.OpPrereqError("Node %s is the primary node of"
7304
                                     " instance %s, cannot use it as"
7305
                                     " secondary" %
7306
                                     (self.op.remote_node, i.name),
7307
                                     errors.ECODE_INVAL)
7308
        result.append([i.name, self.op.remote_node])
7309
    else:
7310
      ial = IAllocator(self.cfg, self.rpc,
7311
                       mode=constants.IALLOCATOR_MODE_MEVAC,
7312
                       evac_nodes=self.op.nodes)
7313
      ial.Run(self.op.iallocator, validate=True)
7314
      if not ial.success:
7315
        raise errors.OpExecError("No valid evacuation solution: %s" % ial.info,
7316
                                 errors.ECODE_NORES)
7317
      result = ial.result
7318
    return result
7319

    
7320

    
7321
class LUGrowDisk(LogicalUnit):
7322
  """Grow a disk of an instance.
7323

7324
  """
7325
  HPATH = "disk-grow"
7326
  HTYPE = constants.HTYPE_INSTANCE
7327
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
7328
  REQ_BGL = False
7329

    
7330
  def ExpandNames(self):
7331
    self._ExpandAndLockInstance()
7332
    self.needed_locks[locking.LEVEL_NODE] = []
7333
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7334

    
7335
  def DeclareLocks(self, level):
7336
    if level == locking.LEVEL_NODE:
7337
      self._LockInstancesNodes()
7338

    
7339
  def BuildHooksEnv(self):
7340
    """Build hooks env.
7341

7342
    This runs on the master, the primary and all the secondaries.
7343

7344
    """
7345
    env = {
7346
      "DISK": self.op.disk,
7347
      "AMOUNT": self.op.amount,
7348
      }
7349
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
7350
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
7351
    return env, nl, nl
7352

    
7353
  def CheckPrereq(self):
7354
    """Check prerequisites.
7355

7356
    This checks that the instance is in the cluster.
7357

7358
    """
7359
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7360
    assert instance is not None, \
7361
      "Cannot retrieve locked instance %s" % self.op.instance_name
7362
    nodenames = list(instance.all_nodes)
7363
    for node in nodenames:
7364
      _CheckNodeOnline(self, node)
7365

    
7366

    
7367
    self.instance = instance
7368

    
7369
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
7370
      raise errors.OpPrereqError("Instance's disk layout does not support"
7371
                                 " growing.", errors.ECODE_INVAL)
7372

    
7373
    self.disk = instance.FindDisk(self.op.disk)
7374

    
7375
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
7376
                                       instance.hypervisor)
7377
    for node in nodenames:
7378
      info = nodeinfo[node]
7379
      info.Raise("Cannot get current information from node %s" % node)
7380
      vg_free = info.payload.get('vg_free', None)
7381
      if not isinstance(vg_free, int):
7382
        raise errors.OpPrereqError("Can't compute free disk space on"
7383
                                   " node %s" % node, errors.ECODE_ENVIRON)
7384
      if self.op.amount > vg_free:
7385
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
7386
                                   " %d MiB available, %d MiB required" %
7387
                                   (node, vg_free, self.op.amount),
7388
                                   errors.ECODE_NORES)
7389

    
7390
  def Exec(self, feedback_fn):
7391
    """Execute disk grow.
7392

7393
    """
7394
    instance = self.instance
7395
    disk = self.disk
7396
    for node in instance.all_nodes:
7397
      self.cfg.SetDiskID(disk, node)
7398
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
7399
      result.Raise("Grow request failed to node %s" % node)
7400

    
7401
      # TODO: Rewrite code to work properly
7402
      # DRBD goes into sync mode for a short amount of time after executing the
7403
      # "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby
7404
      # calling "resize" in sync mode fails. Sleeping for a short amount of
7405
      # time is a work-around.
7406
      time.sleep(5)
7407

    
7408
    disk.RecordGrow(self.op.amount)
7409
    self.cfg.Update(instance, feedback_fn)
7410
    if self.op.wait_for_sync:
7411
      disk_abort = not _WaitForSync(self, instance)
7412
      if disk_abort:
7413
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
7414
                             " status.\nPlease check the instance.")
7415

    
7416

    
7417
class LUQueryInstanceData(NoHooksLU):
7418
  """Query runtime instance data.
7419

7420
  """
7421
  _OP_REQP = ["instances", "static"]
7422
  REQ_BGL = False
7423

    
7424
  def ExpandNames(self):
7425
    self.needed_locks = {}
7426
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
7427

    
7428
    if not isinstance(self.op.instances, list):
7429
      raise errors.OpPrereqError("Invalid argument type 'instances'",
7430
                                 errors.ECODE_INVAL)
7431

    
7432
    if self.op.instances:
7433
      self.wanted_names = []
7434
      for name in self.op.instances:
7435
        full_name = _ExpandInstanceName(self.cfg, name)
7436
        self.wanted_names.append(full_name)
7437
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
7438
    else:
7439
      self.wanted_names = None
7440
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
7441

    
7442
    self.needed_locks[locking.LEVEL_NODE] = []
7443
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7444

    
7445
  def DeclareLocks(self, level):
7446
    if level == locking.LEVEL_NODE:
7447
      self._LockInstancesNodes()
7448

    
7449
  def CheckPrereq(self):
7450
    """Check prerequisites.
7451

7452
    This only checks the optional instance list against the existing names.
7453

7454
    """
7455
    if self.wanted_names is None:
7456
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
7457

    
7458
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
7459
                             in self.wanted_names]
7460
    return
7461

    
7462
  def _ComputeBlockdevStatus(self, node, instance_name, dev):
7463
    """Returns the status of a block device
7464

7465
    """
7466
    if self.op.static or not node:
7467
      return None
7468

    
7469
    self.cfg.SetDiskID(dev, node)
7470

    
7471
    result = self.rpc.call_blockdev_find(node, dev)
7472
    if result.offline:
7473
      return None
7474

    
7475
    result.Raise("Can't compute disk status for %s" % instance_name)
7476

    
7477
    status = result.payload
7478
    if status is None:
7479
      return None
7480

    
7481
    return (status.dev_path, status.major, status.minor,
7482
            status.sync_percent, status.estimated_time,
7483
            status.is_degraded, status.ldisk_status)
7484

    
7485
  def _ComputeDiskStatus(self, instance, snode, dev):
7486
    """Compute block device status.
7487

7488
    """
7489
    if dev.dev_type in constants.LDS_DRBD:
7490
      # we change the snode then (otherwise we use the one passed in)
7491
      if dev.logical_id[0] == instance.primary_node:
7492
        snode = dev.logical_id[1]
7493
      else:
7494
        snode = dev.logical_id[0]
7495

    
7496
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
7497
                                              instance.name, dev)
7498
    dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
7499

    
7500
    if dev.children:
7501
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
7502
                      for child in dev.children]
7503
    else:
7504
      dev_children = []
7505

    
7506
    data = {
7507
      "iv_name": dev.iv_name,
7508
      "dev_type": dev.dev_type,
7509
      "logical_id": dev.logical_id,
7510
      "physical_id": dev.physical_id,
7511
      "pstatus": dev_pstatus,
7512
      "sstatus": dev_sstatus,
7513
      "children": dev_children,
7514
      "mode": dev.mode,
7515
      "size": dev.size,
7516
      }
7517

    
7518
    return data
7519

    
7520
  def Exec(self, feedback_fn):
7521
    """Gather and return data"""
7522
    result = {}
7523

    
7524
    cluster = self.cfg.GetClusterInfo()
7525

    
7526
    for instance in self.wanted_instances:
7527
      if not self.op.static:
7528
        remote_info = self.rpc.call_instance_info(instance.primary_node,
7529
                                                  instance.name,
7530
                                                  instance.hypervisor)
7531
        remote_info.Raise("Error checking node %s" % instance.primary_node)
7532
        remote_info = remote_info.payload
7533
        if remote_info and "state" in remote_info:
7534
          remote_state = "up"
7535
        else:
7536
          remote_state = "down"
7537
      else:
7538
        remote_state = None
7539
      if instance.admin_up:
7540
        config_state = "up"
7541
      else:
7542
        config_state = "down"
7543

    
7544
      disks = [self._ComputeDiskStatus(instance, None, device)
7545
               for device in instance.disks]
7546

    
7547
      idict = {
7548
        "name": instance.name,
7549
        "config_state": config_state,
7550
        "run_state": remote_state,
7551
        "pnode": instance.primary_node,
7552
        "snodes": instance.secondary_nodes,
7553
        "os": instance.os,
7554
        # this happens to be the same format used for hooks
7555
        "nics": _NICListToTuple(self, instance.nics),
7556
        "disks": disks,
7557
        "hypervisor": instance.hypervisor,
7558
        "network_port": instance.network_port,
7559
        "hv_instance": instance.hvparams,
7560
        "hv_actual": cluster.FillHV(instance, skip_globals=True),
7561
        "be_instance": instance.beparams,
7562
        "be_actual": cluster.FillBE(instance),
7563
        "serial_no": instance.serial_no,
7564
        "mtime": instance.mtime,
7565
        "ctime": instance.ctime,
7566
        "uuid": instance.uuid,
7567
        }
7568

    
7569
      result[instance.name] = idict
7570

    
7571
    return result
7572

    
7573

    
7574
class LUSetInstanceParams(LogicalUnit):
7575
  """Modifies an instances's parameters.
7576

7577
  """
7578
  HPATH = "instance-modify"
7579
  HTYPE = constants.HTYPE_INSTANCE
7580
  _OP_REQP = ["instance_name"]
7581
  REQ_BGL = False
7582

    
7583
  def CheckArguments(self):
7584
    if not hasattr(self.op, 'nics'):
7585
      self.op.nics = []
7586
    if not hasattr(self.op, 'disks'):
7587
      self.op.disks = []
7588
    if not hasattr(self.op, 'beparams'):
7589
      self.op.beparams = {}
7590
    if not hasattr(self.op, 'hvparams'):
7591
      self.op.hvparams = {}
7592
    self.op.force = getattr(self.op, "force", False)
7593
    if not (self.op.nics or self.op.disks or
7594
            self.op.hvparams or self.op.beparams):
7595
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
7596

    
7597
    if self.op.hvparams:
7598
      _CheckGlobalHvParams(self.op.hvparams)
7599

    
7600
    # Disk validation
7601
    disk_addremove = 0
7602
    for disk_op, disk_dict in self.op.disks:
7603
      if disk_op == constants.DDM_REMOVE:
7604
        disk_addremove += 1
7605
        continue
7606
      elif disk_op == constants.DDM_ADD:
7607
        disk_addremove += 1
7608
      else:
7609
        if not isinstance(disk_op, int):
7610
          raise errors.OpPrereqError("Invalid disk index", errors.ECODE_INVAL)
7611
        if not isinstance(disk_dict, dict):
7612
          msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
7613
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
7614

    
7615
      if disk_op == constants.DDM_ADD:
7616
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
7617
        if mode not in constants.DISK_ACCESS_SET:
7618
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
7619
                                     errors.ECODE_INVAL)
7620
        size = disk_dict.get('size', None)
7621
        if size is None:
7622
          raise errors.OpPrereqError("Required disk parameter size missing",
7623
                                     errors.ECODE_INVAL)
7624
        try:
7625
          size = int(size)
7626
        except (TypeError, ValueError), err:
7627
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
7628
                                     str(err), errors.ECODE_INVAL)
7629
        disk_dict['size'] = size
7630
      else:
7631
        # modification of disk
7632
        if 'size' in disk_dict:
7633
          raise errors.OpPrereqError("Disk size change not possible, use"
7634
                                     " grow-disk", errors.ECODE_INVAL)
7635

    
7636
    if disk_addremove > 1:
7637
      raise errors.OpPrereqError("Only one disk add or remove operation"
7638
                                 " supported at a time", errors.ECODE_INVAL)
7639

    
7640
    # NIC validation
7641
    nic_addremove = 0
7642
    for nic_op, nic_dict in self.op.nics:
7643
      if nic_op == constants.DDM_REMOVE:
7644
        nic_addremove += 1
7645
        continue
7646
      elif nic_op == constants.DDM_ADD:
7647
        nic_addremove += 1
7648
      else:
7649
        if not isinstance(nic_op, int):
7650
          raise errors.OpPrereqError("Invalid nic index", errors.ECODE_INVAL)
7651
        if not isinstance(nic_dict, dict):
7652
          msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
7653
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
7654

    
7655
      # nic_dict should be a dict
7656
      nic_ip = nic_dict.get('ip', None)
7657
      if nic_ip is not None:
7658
        if nic_ip.lower() == constants.VALUE_NONE:
7659
          nic_dict['ip'] = None
7660
        else:
7661
          if not utils.IsValidIP(nic_ip):
7662
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip,
7663
                                       errors.ECODE_INVAL)
7664

    
7665
      nic_bridge = nic_dict.get('bridge', None)
7666
      nic_link = nic_dict.get('link', None)
7667
      if nic_bridge and nic_link:
7668
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
7669
                                   " at the same time", errors.ECODE_INVAL)
7670
      elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
7671
        nic_dict['bridge'] = None
7672
      elif nic_link and nic_link.lower() == constants.VALUE_NONE:
7673
        nic_dict['link'] = None
7674

    
7675
      if nic_op == constants.DDM_ADD:
7676
        nic_mac = nic_dict.get('mac', None)
7677
        if nic_mac is None:
7678
          nic_dict['mac'] = constants.VALUE_AUTO
7679

    
7680
      if 'mac' in nic_dict:
7681
        nic_mac = nic_dict['mac']
7682
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7683
          nic_mac = utils.NormalizeAndValidateMac(nic_mac)
7684

    
7685
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
7686
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
7687
                                     " modifying an existing nic",
7688
                                     errors.ECODE_INVAL)
7689

    
7690
    if nic_addremove > 1:
7691
      raise errors.OpPrereqError("Only one NIC add or remove operation"
7692
                                 " supported at a time", errors.ECODE_INVAL)
7693

    
7694
  def ExpandNames(self):
7695
    self._ExpandAndLockInstance()
7696
    self.needed_locks[locking.LEVEL_NODE] = []
7697
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7698

    
7699
  def DeclareLocks(self, level):
7700
    if level == locking.LEVEL_NODE:
7701
      self._LockInstancesNodes()
7702

    
7703
  def BuildHooksEnv(self):
7704
    """Build hooks env.
7705

7706
    This runs on the master, primary and secondaries.
7707

7708
    """
7709
    args = dict()
7710
    if constants.BE_MEMORY in self.be_new:
7711
      args['memory'] = self.be_new[constants.BE_MEMORY]
7712
    if constants.BE_VCPUS in self.be_new:
7713
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
7714
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
7715
    # information at all.
7716
    if self.op.nics:
7717
      args['nics'] = []
7718
      nic_override = dict(self.op.nics)
7719
      c_nicparams = self.cluster.nicparams[constants.PP_DEFAULT]
7720
      for idx, nic in enumerate(self.instance.nics):
7721
        if idx in nic_override:
7722
          this_nic_override = nic_override[idx]
7723
        else:
7724
          this_nic_override = {}
7725
        if 'ip' in this_nic_override:
7726
          ip = this_nic_override['ip']
7727
        else:
7728
          ip = nic.ip
7729
        if 'mac' in this_nic_override:
7730
          mac = this_nic_override['mac']
7731
        else:
7732
          mac = nic.mac
7733
        if idx in self.nic_pnew:
7734
          nicparams = self.nic_pnew[idx]
7735
        else:
7736
          nicparams = objects.FillDict(c_nicparams, nic.nicparams)
7737
        mode = nicparams[constants.NIC_MODE]
7738
        link = nicparams[constants.NIC_LINK]
7739
        args['nics'].append((ip, mac, mode, link))
7740
      if constants.DDM_ADD in nic_override:
7741
        ip = nic_override[constants.DDM_ADD].get('ip', None)
7742
        mac = nic_override[constants.DDM_ADD]['mac']
7743
        nicparams = self.nic_pnew[constants.DDM_ADD]
7744
        mode = nicparams[constants.NIC_MODE]
7745
        link = nicparams[constants.NIC_LINK]
7746
        args['nics'].append((ip, mac, mode, link))
7747
      elif constants.DDM_REMOVE in nic_override:
7748
        del args['nics'][-1]
7749

    
7750
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
7751
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
7752
    return env, nl, nl
7753

    
7754
  @staticmethod
7755
  def _GetUpdatedParams(old_params, update_dict,
7756
                        default_values, parameter_types):
7757
    """Return the new params dict for the given params.
7758

7759
    @type old_params: dict
7760
    @param old_params: old parameters
7761
    @type update_dict: dict
7762
    @param update_dict: dict containing new parameter values,
7763
                        or constants.VALUE_DEFAULT to reset the
7764
                        parameter to its default value
7765
    @type default_values: dict
7766
    @param default_values: default values for the filled parameters
7767
    @type parameter_types: dict
7768
    @param parameter_types: dict mapping target dict keys to types
7769
                            in constants.ENFORCEABLE_TYPES
7770
    @rtype: (dict, dict)
7771
    @return: (new_parameters, filled_parameters)
7772

7773
    """
7774
    params_copy = copy.deepcopy(old_params)
7775
    for key, val in update_dict.iteritems():
7776
      if val == constants.VALUE_DEFAULT:
7777
        try:
7778
          del params_copy[key]
7779
        except KeyError:
7780
          pass
7781
      else:
7782
        params_copy[key] = val
7783
    utils.ForceDictType(params_copy, parameter_types)
7784
    params_filled = objects.FillDict(default_values, params_copy)
7785
    return (params_copy, params_filled)
7786

    
7787
  def CheckPrereq(self):
7788
    """Check prerequisites.
7789

7790
    This only checks the instance list against the existing names.
7791

7792
    """
7793
    self.force = self.op.force
7794

    
7795
    # checking the new params on the primary/secondary nodes
7796

    
7797
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7798
    cluster = self.cluster = self.cfg.GetClusterInfo()
7799
    assert self.instance is not None, \
7800
      "Cannot retrieve locked instance %s" % self.op.instance_name
7801
    pnode = instance.primary_node
7802
    nodelist = list(instance.all_nodes)
7803

    
7804
    # hvparams processing
7805
    if self.op.hvparams:
7806
      i_hvdict, hv_new = self._GetUpdatedParams(
7807
                             instance.hvparams, self.op.hvparams,
7808
                             cluster.hvparams[instance.hypervisor],
7809
                             constants.HVS_PARAMETER_TYPES)
7810
      # local check
7811
      hypervisor.GetHypervisor(
7812
        instance.hypervisor).CheckParameterSyntax(hv_new)
7813
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
7814
      self.hv_new = hv_new # the new actual values
7815
      self.hv_inst = i_hvdict # the new dict (without defaults)
7816
    else:
7817
      self.hv_new = self.hv_inst = {}
7818

    
7819
    # beparams processing
7820
    if self.op.beparams:
7821
      i_bedict, be_new = self._GetUpdatedParams(
7822
                             instance.beparams, self.op.beparams,
7823
                             cluster.beparams[constants.PP_DEFAULT],
7824
                             constants.BES_PARAMETER_TYPES)
7825
      self.be_new = be_new # the new actual values
7826
      self.be_inst = i_bedict # the new dict (without defaults)
7827
    else:
7828
      self.be_new = self.be_inst = {}
7829

    
7830
    self.warn = []
7831

    
7832
    if constants.BE_MEMORY in self.op.beparams and not self.force:
7833
      mem_check_list = [pnode]
7834
      if be_new[constants.BE_AUTO_BALANCE]:
7835
        # either we changed auto_balance to yes or it was from before
7836
        mem_check_list.extend(instance.secondary_nodes)
7837
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
7838
                                                  instance.hypervisor)
7839
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
7840
                                         instance.hypervisor)
7841
      pninfo = nodeinfo[pnode]
7842
      msg = pninfo.fail_msg
7843
      if msg:
7844
        # Assume the primary node is unreachable and go ahead
7845
        self.warn.append("Can't get info from primary node %s: %s" %
7846
                         (pnode,  msg))
7847
      elif not isinstance(pninfo.payload.get('memory_free', None), int):
7848
        self.warn.append("Node data from primary node %s doesn't contain"
7849
                         " free memory information" % pnode)
7850
      elif instance_info.fail_msg:
7851
        self.warn.append("Can't get instance runtime information: %s" %
7852
                        instance_info.fail_msg)
7853
      else:
7854
        if instance_info.payload:
7855
          current_mem = int(instance_info.payload['memory'])
7856
        else:
7857
          # Assume instance not running
7858
          # (there is a slight race condition here, but it's not very probable,
7859
          # and we have no other way to check)
7860
          current_mem = 0
7861
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
7862
                    pninfo.payload['memory_free'])
7863
        if miss_mem > 0:
7864
          raise errors.OpPrereqError("This change will prevent the instance"
7865
                                     " from starting, due to %d MB of memory"
7866
                                     " missing on its primary node" % miss_mem,
7867
                                     errors.ECODE_NORES)
7868

    
7869
      if be_new[constants.BE_AUTO_BALANCE]:
7870
        for node, nres in nodeinfo.items():
7871
          if node not in instance.secondary_nodes:
7872
            continue
7873
          msg = nres.fail_msg
7874
          if msg:
7875
            self.warn.append("Can't get info from secondary node %s: %s" %
7876
                             (node, msg))
7877
          elif not isinstance(nres.payload.get('memory_free', None), int):
7878
            self.warn.append("Secondary node %s didn't return free"
7879
                             " memory information" % node)
7880
          elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
7881
            self.warn.append("Not enough memory to failover instance to"
7882
                             " secondary node %s" % node)
7883

    
7884
    # NIC processing
7885
    self.nic_pnew = {}
7886
    self.nic_pinst = {}
7887
    for nic_op, nic_dict in self.op.nics:
7888
      if nic_op == constants.DDM_REMOVE:
7889
        if not instance.nics:
7890
          raise errors.OpPrereqError("Instance has no NICs, cannot remove",
7891
                                     errors.ECODE_INVAL)
7892
        continue
7893
      if nic_op != constants.DDM_ADD:
7894
        # an existing nic
7895
        if not instance.nics:
7896
          raise errors.OpPrereqError("Invalid NIC index %s, instance has"
7897
                                     " no NICs" % nic_op,
7898
                                     errors.ECODE_INVAL)
7899
        if nic_op < 0 or nic_op >= len(instance.nics):
7900
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
7901
                                     " are 0 to %d" %
7902
                                     (nic_op, len(instance.nics) - 1),
7903
                                     errors.ECODE_INVAL)
7904
        old_nic_params = instance.nics[nic_op].nicparams
7905
        old_nic_ip = instance.nics[nic_op].ip
7906
      else:
7907
        old_nic_params = {}
7908
        old_nic_ip = None
7909

    
7910
      update_params_dict = dict([(key, nic_dict[key])
7911
                                 for key in constants.NICS_PARAMETERS
7912
                                 if key in nic_dict])
7913

    
7914
      if 'bridge' in nic_dict:
7915
        update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
7916

    
7917
      new_nic_params, new_filled_nic_params = \
7918
          self._GetUpdatedParams(old_nic_params, update_params_dict,
7919
                                 cluster.nicparams[constants.PP_DEFAULT],
7920
                                 constants.NICS_PARAMETER_TYPES)
7921
      objects.NIC.CheckParameterSyntax(new_filled_nic_params)
7922
      self.nic_pinst[nic_op] = new_nic_params
7923
      self.nic_pnew[nic_op] = new_filled_nic_params
7924
      new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
7925

    
7926
      if new_nic_mode == constants.NIC_MODE_BRIDGED:
7927
        nic_bridge = new_filled_nic_params[constants.NIC_LINK]
7928
        msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
7929
        if msg:
7930
          msg = "Error checking bridges on node %s: %s" % (pnode, msg)
7931
          if self.force:
7932
            self.warn.append(msg)
7933
          else:
7934
            raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
7935
      if new_nic_mode == constants.NIC_MODE_ROUTED:
7936
        if 'ip' in nic_dict:
7937
          nic_ip = nic_dict['ip']
7938
        else:
7939
          nic_ip = old_nic_ip
7940
        if nic_ip is None:
7941
          raise errors.OpPrereqError('Cannot set the nic ip to None'
7942
                                     ' on a routed nic', errors.ECODE_INVAL)
7943
      if 'mac' in nic_dict:
7944
        nic_mac = nic_dict['mac']
7945
        if nic_mac is None:
7946
          raise errors.OpPrereqError('Cannot set the nic mac to None',
7947
                                     errors.ECODE_INVAL)
7948
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7949
          # otherwise generate the mac
7950
          nic_dict['mac'] = self.cfg.GenerateMAC(self.proc.GetECId())
7951
        else:
7952
          # or validate/reserve the current one
7953
          try:
7954
            self.cfg.ReserveMAC(nic_mac, self.proc.GetECId())
7955
          except errors.ReservationError:
7956
            raise errors.OpPrereqError("MAC address %s already in use"
7957
                                       " in cluster" % nic_mac,
7958
                                       errors.ECODE_NOTUNIQUE)
7959

    
7960
    # DISK processing
7961
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
7962
      raise errors.OpPrereqError("Disk operations not supported for"
7963
                                 " diskless instances",
7964
                                 errors.ECODE_INVAL)
7965
    for disk_op, _ in self.op.disks:
7966
      if disk_op == constants.DDM_REMOVE:
7967
        if len(instance.disks) == 1:
7968
          raise errors.OpPrereqError("Cannot remove the last disk of"
7969
                                     " an instance",
7970
                                     errors.ECODE_INVAL)
7971
        ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
7972
        ins_l = ins_l[pnode]
7973
        msg = ins_l.fail_msg
7974
        if msg:
7975
          raise errors.OpPrereqError("Can't contact node %s: %s" %
7976
                                     (pnode, msg), errors.ECODE_ENVIRON)
7977
        if instance.name in ins_l.payload:
7978
          raise errors.OpPrereqError("Instance is running, can't remove"
7979
                                     " disks.", errors.ECODE_STATE)
7980

    
7981
      if (disk_op == constants.DDM_ADD and
7982
          len(instance.nics) >= constants.MAX_DISKS):
7983
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
7984
                                   " add more" % constants.MAX_DISKS,
7985
                                   errors.ECODE_STATE)
7986
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
7987
        # an existing disk
7988
        if disk_op < 0 or disk_op >= len(instance.disks):
7989
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
7990
                                     " are 0 to %d" %
7991
                                     (disk_op, len(instance.disks)),
7992
                                     errors.ECODE_INVAL)
7993

    
7994
    return
7995

    
7996
  def Exec(self, feedback_fn):
7997
    """Modifies an instance.
7998

7999
    All parameters take effect only at the next restart of the instance.
8000

8001
    """
8002
    # Process here the warnings from CheckPrereq, as we don't have a
8003
    # feedback_fn there.
8004
    for warn in self.warn:
8005
      feedback_fn("WARNING: %s" % warn)
8006

    
8007
    result = []
8008
    instance = self.instance
8009
    # disk changes
8010
    for disk_op, disk_dict in self.op.disks:
8011
      if disk_op == constants.DDM_REMOVE:
8012
        # remove the last disk
8013
        device = instance.disks.pop()
8014
        device_idx = len(instance.disks)
8015
        for node, disk in device.ComputeNodeTree(instance.primary_node):
8016
          self.cfg.SetDiskID(disk, node)
8017
          msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
8018
          if msg:
8019
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
8020
                            " continuing anyway", device_idx, node, msg)
8021
        result.append(("disk/%d" % device_idx, "remove"))
8022
      elif disk_op == constants.DDM_ADD:
8023
        # add a new disk
8024
        if instance.disk_template == constants.DT_FILE:
8025
          file_driver, file_path = instance.disks[0].logical_id
8026
          file_path = os.path.dirname(file_path)
8027
        else:
8028
          file_driver = file_path = None
8029
        disk_idx_base = len(instance.disks)
8030
        new_disk = _GenerateDiskTemplate(self,
8031
                                         instance.disk_template,
8032
                                         instance.name, instance.primary_node,
8033
                                         instance.secondary_nodes,
8034
                                         [disk_dict],
8035
                                         file_path,
8036
                                         file_driver,
8037
                                         disk_idx_base)[0]
8038
        instance.disks.append(new_disk)
8039
        info = _GetInstanceInfoText(instance)
8040

    
8041
        logging.info("Creating volume %s for instance %s",
8042
                     new_disk.iv_name, instance.name)
8043
        # Note: this needs to be kept in sync with _CreateDisks
8044
        #HARDCODE
8045
        for node in instance.all_nodes:
8046
          f_create = node == instance.primary_node
8047
          try:
8048
            _CreateBlockDev(self, node, instance, new_disk,
8049
                            f_create, info, f_create)
8050
          except errors.OpExecError, err:
8051
            self.LogWarning("Failed to create volume %s (%s) on"
8052
                            " node %s: %s",
8053
                            new_disk.iv_name, new_disk, node, err)
8054
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
8055
                       (new_disk.size, new_disk.mode)))
8056
      else:
8057
        # change a given disk
8058
        instance.disks[disk_op].mode = disk_dict['mode']
8059
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
8060
    # NIC changes
8061
    for nic_op, nic_dict in self.op.nics:
8062
      if nic_op == constants.DDM_REMOVE:
8063
        # remove the last nic
8064
        del instance.nics[-1]
8065
        result.append(("nic.%d" % len(instance.nics), "remove"))
8066
      elif nic_op == constants.DDM_ADD:
8067
        # mac and bridge should be set, by now
8068
        mac = nic_dict['mac']
8069
        ip = nic_dict.get('ip', None)
8070
        nicparams = self.nic_pinst[constants.DDM_ADD]
8071
        new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
8072
        instance.nics.append(new_nic)
8073
        result.append(("nic.%d" % (len(instance.nics) - 1),
8074
                       "add:mac=%s,ip=%s,mode=%s,link=%s" %
8075
                       (new_nic.mac, new_nic.ip,
8076
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
8077
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
8078
                       )))
8079
      else:
8080
        for key in 'mac', 'ip':
8081
          if key in nic_dict:
8082
            setattr(instance.nics[nic_op], key, nic_dict[key])
8083
        if nic_op in self.nic_pinst:
8084
          instance.nics[nic_op].nicparams = self.nic_pinst[nic_op]
8085
        for key, val in nic_dict.iteritems():
8086
          result.append(("nic.%s/%d" % (key, nic_op), val))
8087

    
8088
    # hvparams changes
8089
    if self.op.hvparams:
8090
      instance.hvparams = self.hv_inst
8091
      for key, val in self.op.hvparams.iteritems():
8092
        result.append(("hv/%s" % key, val))
8093

    
8094
    # beparams changes
8095
    if self.op.beparams:
8096
      instance.beparams = self.be_inst
8097
      for key, val in self.op.beparams.iteritems():
8098
        result.append(("be/%s" % key, val))
8099

    
8100
    self.cfg.Update(instance, feedback_fn)
8101

    
8102
    return result
8103

    
8104

    
8105
class LUQueryExports(NoHooksLU):
8106
  """Query the exports list
8107

8108
  """
8109
  _OP_REQP = ['nodes']
8110
  REQ_BGL = False
8111

    
8112
  def ExpandNames(self):
8113
    self.needed_locks = {}
8114
    self.share_locks[locking.LEVEL_NODE] = 1
8115
    if not self.op.nodes:
8116
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
8117
    else:
8118
      self.needed_locks[locking.LEVEL_NODE] = \
8119
        _GetWantedNodes(self, self.op.nodes)
8120

    
8121
  def CheckPrereq(self):
8122
    """Check prerequisites.
8123

8124
    """
8125
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
8126

    
8127
  def Exec(self, feedback_fn):
8128
    """Compute the list of all the exported system images.
8129

8130
    @rtype: dict
8131
    @return: a dictionary with the structure node->(export-list)
8132
        where export-list is a list of the instances exported on
8133
        that node.
8134

8135
    """
8136
    rpcresult = self.rpc.call_export_list(self.nodes)
8137
    result = {}
8138
    for node in rpcresult:
8139
      if rpcresult[node].fail_msg:
8140
        result[node] = False
8141
      else:
8142
        result[node] = rpcresult[node].payload
8143

    
8144
    return result
8145

    
8146

    
8147
class LUExportInstance(LogicalUnit):
8148
  """Export an instance to an image in the cluster.
8149

8150
  """
8151
  HPATH = "instance-export"
8152
  HTYPE = constants.HTYPE_INSTANCE
8153
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
8154
  REQ_BGL = False
8155

    
8156
  def CheckArguments(self):
8157
    """Check the arguments.
8158

8159
    """
8160
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
8161
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
8162

    
8163
  def ExpandNames(self):
8164
    self._ExpandAndLockInstance()
8165
    # FIXME: lock only instance primary and destination node
8166
    #
8167
    # Sad but true, for now we have do lock all nodes, as we don't know where
8168
    # the previous export might be, and and in this LU we search for it and
8169
    # remove it from its current node. In the future we could fix this by:
8170
    #  - making a tasklet to search (share-lock all), then create the new one,
8171
    #    then one to remove, after
8172
    #  - removing the removal operation altogether
8173
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
8174

    
8175
  def DeclareLocks(self, level):
8176
    """Last minute lock declaration."""
8177
    # All nodes are locked anyway, so nothing to do here.
8178

    
8179
  def BuildHooksEnv(self):
8180
    """Build hooks env.
8181

8182
    This will run on the master, primary node and target node.
8183

8184
    """
8185
    env = {
8186
      "EXPORT_NODE": self.op.target_node,
8187
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
8188
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
8189
      }
8190
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
8191
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
8192
          self.op.target_node]
8193
    return env, nl, nl
8194

    
8195
  def CheckPrereq(self):
8196
    """Check prerequisites.
8197

8198
    This checks that the instance and node names are valid.
8199

8200
    """
8201
    instance_name = self.op.instance_name
8202
    self.instance = self.cfg.GetInstanceInfo(instance_name)
8203
    assert self.instance is not None, \
8204
          "Cannot retrieve locked instance %s" % self.op.instance_name
8205
    _CheckNodeOnline(self, self.instance.primary_node)
8206

    
8207
    self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
8208
    self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
8209
    assert self.dst_node is not None
8210

    
8211
    _CheckNodeOnline(self, self.dst_node.name)
8212
    _CheckNodeNotDrained(self, self.dst_node.name)
8213

    
8214
    # instance disk type verification
8215
    for disk in self.instance.disks:
8216
      if disk.dev_type == constants.LD_FILE:
8217
        raise errors.OpPrereqError("Export not supported for instances with"
8218
                                   " file-based disks", errors.ECODE_INVAL)
8219

    
8220
  def Exec(self, feedback_fn):
8221
    """Export an instance to an image in the cluster.
8222

8223
    """
8224
    instance = self.instance
8225
    dst_node = self.dst_node
8226
    src_node = instance.primary_node
8227

    
8228
    if self.op.shutdown:
8229
      # shutdown the instance, but not the disks
8230
      feedback_fn("Shutting down instance %s" % instance.name)
8231
      result = self.rpc.call_instance_shutdown(src_node, instance,
8232
                                               self.shutdown_timeout)
8233
      result.Raise("Could not shutdown instance %s on"
8234
                   " node %s" % (instance.name, src_node))
8235

    
8236
    vgname = self.cfg.GetVGName()
8237

    
8238
    snap_disks = []
8239

    
8240
    # set the disks ID correctly since call_instance_start needs the
8241
    # correct drbd minor to create the symlinks
8242
    for disk in instance.disks:
8243
      self.cfg.SetDiskID(disk, src_node)
8244

    
8245
    activate_disks = (not instance.admin_up)
8246

    
8247
    if activate_disks:
8248
      # Activate the instance disks if we'exporting a stopped instance
8249
      feedback_fn("Activating disks for %s" % instance.name)
8250
      _StartInstanceDisks(self, instance, None)
8251

    
8252
    try:
8253
      # per-disk results
8254
      dresults = []
8255
      try:
8256
        for idx, disk in enumerate(instance.disks):
8257
          feedback_fn("Creating a snapshot of disk/%s on node %s" %
8258
                      (idx, src_node))
8259

    
8260
          # result.payload will be a snapshot of an lvm leaf of the one we
8261
          # passed
8262
          result = self.rpc.call_blockdev_snapshot(src_node, disk)
8263
          msg = result.fail_msg
8264
          if msg:
8265
            self.LogWarning("Could not snapshot disk/%s on node %s: %s",
8266
                            idx, src_node, msg)
8267
            snap_disks.append(False)
8268
          else:
8269
            disk_id = (vgname, result.payload)
8270
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
8271
                                   logical_id=disk_id, physical_id=disk_id,
8272
                                   iv_name=disk.iv_name)
8273
            snap_disks.append(new_dev)
8274

    
8275
      finally:
8276
        if self.op.shutdown and instance.admin_up:
8277
          feedback_fn("Starting instance %s" % instance.name)
8278
          result = self.rpc.call_instance_start(src_node, instance, None, None)
8279
          msg = result.fail_msg
8280
          if msg:
8281
            _ShutdownInstanceDisks(self, instance)
8282
            raise errors.OpExecError("Could not start instance: %s" % msg)
8283

    
8284
      # TODO: check for size
8285

    
8286
      cluster_name = self.cfg.GetClusterName()
8287
      for idx, dev in enumerate(snap_disks):
8288
        feedback_fn("Exporting snapshot %s from %s to %s" %
8289
                    (idx, src_node, dst_node.name))
8290
        if dev:
8291
          # FIXME: pass debug from opcode to backend
8292
          result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
8293
                                                 instance, cluster_name,
8294
                                                 idx, self.op.debug_level)
8295
          msg = result.fail_msg
8296
          if msg:
8297
            self.LogWarning("Could not export disk/%s from node %s to"
8298
                            " node %s: %s", idx, src_node, dst_node.name, msg)
8299
            dresults.append(False)
8300
          else:
8301
            dresults.append(True)
8302
          msg = self.rpc.call_blockdev_remove(src_node, dev).fail_msg
8303
          if msg:
8304
            self.LogWarning("Could not remove snapshot for disk/%d from node"
8305
                            " %s: %s", idx, src_node, msg)
8306
        else:
8307
          dresults.append(False)
8308

    
8309
      feedback_fn("Finalizing export on %s" % dst_node.name)
8310
      result = self.rpc.call_finalize_export(dst_node.name, instance,
8311
                                             snap_disks)
8312
      fin_resu = True
8313
      msg = result.fail_msg
8314
      if msg:
8315
        self.LogWarning("Could not finalize export for instance %s"
8316
                        " on node %s: %s", instance.name, dst_node.name, msg)
8317
        fin_resu = False
8318

    
8319
    finally:
8320
      if activate_disks:
8321
        feedback_fn("Deactivating disks for %s" % instance.name)
8322
        _ShutdownInstanceDisks(self, instance)
8323

    
8324
    nodelist = self.cfg.GetNodeList()
8325
    nodelist.remove(dst_node.name)
8326

    
8327
    # on one-node clusters nodelist will be empty after the removal
8328
    # if we proceed the backup would be removed because OpQueryExports
8329
    # substitutes an empty list with the full cluster node list.
8330
    iname = instance.name
8331
    if nodelist:
8332
      feedback_fn("Removing old exports for instance %s" % iname)
8333
      exportlist = self.rpc.call_export_list(nodelist)
8334
      for node in exportlist:
8335
        if exportlist[node].fail_msg:
8336
          continue
8337
        if iname in exportlist[node].payload:
8338
          msg = self.rpc.call_export_remove(node, iname).fail_msg
8339
          if msg:
8340
            self.LogWarning("Could not remove older export for instance %s"
8341
                            " on node %s: %s", iname, node, msg)
8342
    return fin_resu, dresults
8343

    
8344

    
8345
class LURemoveExport(NoHooksLU):
8346
  """Remove exports related to the named instance.
8347

8348
  """
8349
  _OP_REQP = ["instance_name"]
8350
  REQ_BGL = False
8351

    
8352
  def ExpandNames(self):
8353
    self.needed_locks = {}
8354
    # We need all nodes to be locked in order for RemoveExport to work, but we
8355
    # don't need to lock the instance itself, as nothing will happen to it (and
8356
    # we can remove exports also for a removed instance)
8357
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
8358

    
8359
  def CheckPrereq(self):
8360
    """Check prerequisites.
8361
    """
8362
    pass
8363

    
8364
  def Exec(self, feedback_fn):
8365
    """Remove any export.
8366

8367
    """
8368
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
8369
    # If the instance was not found we'll try with the name that was passed in.
8370
    # This will only work if it was an FQDN, though.
8371
    fqdn_warn = False
8372
    if not instance_name:
8373
      fqdn_warn = True
8374
      instance_name = self.op.instance_name
8375

    
8376
    locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
8377
    exportlist = self.rpc.call_export_list(locked_nodes)
8378
    found = False
8379
    for node in exportlist:
8380
      msg = exportlist[node].fail_msg
8381
      if msg:
8382
        self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
8383
        continue
8384
      if instance_name in exportlist[node].payload:
8385
        found = True
8386
        result = self.rpc.call_export_remove(node, instance_name)
8387
        msg = result.fail_msg
8388
        if msg:
8389
          logging.error("Could not remove export for instance %s"
8390
                        " on node %s: %s", instance_name, node, msg)
8391

    
8392
    if fqdn_warn and not found:
8393
      feedback_fn("Export not found. If trying to remove an export belonging"
8394
                  " to a deleted instance please use its Fully Qualified"
8395
                  " Domain Name.")
8396

    
8397

    
8398
class TagsLU(NoHooksLU): # pylint: disable-msg=W0223
8399
  """Generic tags LU.
8400

8401
  This is an abstract class which is the parent of all the other tags LUs.
8402

8403
  """
8404

    
8405
  def ExpandNames(self):
8406
    self.needed_locks = {}
8407
    if self.op.kind == constants.TAG_NODE:
8408
      self.op.name = _ExpandNodeName(self.cfg, self.op.name)
8409
      self.needed_locks[locking.LEVEL_NODE] = self.op.name
8410
    elif self.op.kind == constants.TAG_INSTANCE:
8411
      self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
8412
      self.needed_locks[locking.LEVEL_INSTANCE] = self.op.name
8413

    
8414
  def CheckPrereq(self):
8415
    """Check prerequisites.
8416

8417
    """
8418
    if self.op.kind == constants.TAG_CLUSTER:
8419
      self.target = self.cfg.GetClusterInfo()
8420
    elif self.op.kind == constants.TAG_NODE:
8421
      self.target = self.cfg.GetNodeInfo(self.op.name)
8422
    elif self.op.kind == constants.TAG_INSTANCE:
8423
      self.target = self.cfg.GetInstanceInfo(self.op.name)
8424
    else:
8425
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
8426
                                 str(self.op.kind), errors.ECODE_INVAL)
8427

    
8428

    
8429
class LUGetTags(TagsLU):
8430
  """Returns the tags of a given object.
8431

8432
  """
8433
  _OP_REQP = ["kind", "name"]
8434
  REQ_BGL = False
8435

    
8436
  def Exec(self, feedback_fn):
8437
    """Returns the tag list.
8438

8439
    """
8440
    return list(self.target.GetTags())
8441

    
8442

    
8443
class LUSearchTags(NoHooksLU):
8444
  """Searches the tags for a given pattern.
8445

8446
  """
8447
  _OP_REQP = ["pattern"]
8448
  REQ_BGL = False
8449

    
8450
  def ExpandNames(self):
8451
    self.needed_locks = {}
8452

    
8453
  def CheckPrereq(self):
8454
    """Check prerequisites.
8455

8456
    This checks the pattern passed for validity by compiling it.
8457

8458
    """
8459
    try:
8460
      self.re = re.compile(self.op.pattern)
8461
    except re.error, err:
8462
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
8463
                                 (self.op.pattern, err), errors.ECODE_INVAL)
8464

    
8465
  def Exec(self, feedback_fn):
8466
    """Returns the tag list.
8467

8468
    """
8469
    cfg = self.cfg
8470
    tgts = [("/cluster", cfg.GetClusterInfo())]
8471
    ilist = cfg.GetAllInstancesInfo().values()
8472
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
8473
    nlist = cfg.GetAllNodesInfo().values()
8474
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
8475
    results = []
8476
    for path, target in tgts:
8477
      for tag in target.GetTags():
8478
        if self.re.search(tag):
8479
          results.append((path, tag))
8480
    return results
8481

    
8482

    
8483
class LUAddTags(TagsLU):
8484
  """Sets a tag on a given object.
8485

8486
  """
8487
  _OP_REQP = ["kind", "name", "tags"]
8488
  REQ_BGL = False
8489

    
8490
  def CheckPrereq(self):
8491
    """Check prerequisites.
8492

8493
    This checks the type and length of the tag name and value.
8494

8495
    """
8496
    TagsLU.CheckPrereq(self)
8497
    for tag in self.op.tags:
8498
      objects.TaggableObject.ValidateTag(tag)
8499

    
8500
  def Exec(self, feedback_fn):
8501
    """Sets the tag.
8502

8503
    """
8504
    try:
8505
      for tag in self.op.tags:
8506
        self.target.AddTag(tag)
8507
    except errors.TagError, err:
8508
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
8509
    self.cfg.Update(self.target, feedback_fn)
8510

    
8511

    
8512
class LUDelTags(TagsLU):
8513
  """Delete a list of tags from a given object.
8514

8515
  """
8516
  _OP_REQP = ["kind", "name", "tags"]
8517
  REQ_BGL = False
8518

    
8519
  def CheckPrereq(self):
8520
    """Check prerequisites.
8521

8522
    This checks that we have the given tag.
8523

8524
    """
8525
    TagsLU.CheckPrereq(self)
8526
    for tag in self.op.tags:
8527
      objects.TaggableObject.ValidateTag(tag)
8528
    del_tags = frozenset(self.op.tags)
8529
    cur_tags = self.target.GetTags()
8530
    if not del_tags <= cur_tags:
8531
      diff_tags = del_tags - cur_tags
8532
      diff_names = ["'%s'" % tag for tag in diff_tags]
8533
      diff_names.sort()
8534
      raise errors.OpPrereqError("Tag(s) %s not found" %
8535
                                 (",".join(diff_names)), errors.ECODE_NOENT)
8536

    
8537
  def Exec(self, feedback_fn):
8538
    """Remove the tag from the object.
8539

8540
    """
8541
    for tag in self.op.tags:
8542
      self.target.RemoveTag(tag)
8543
    self.cfg.Update(self.target, feedback_fn)
8544

    
8545

    
8546
class LUTestDelay(NoHooksLU):
8547
  """Sleep for a specified amount of time.
8548

8549
  This LU sleeps on the master and/or nodes for a specified amount of
8550
  time.
8551

8552
  """
8553
  _OP_REQP = ["duration", "on_master", "on_nodes"]
8554
  REQ_BGL = False
8555

    
8556
  def ExpandNames(self):
8557
    """Expand names and set required locks.
8558

8559
    This expands the node list, if any.
8560

8561
    """
8562
    self.needed_locks = {}
8563
    if self.op.on_nodes:
8564
      # _GetWantedNodes can be used here, but is not always appropriate to use
8565
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
8566
      # more information.
8567
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
8568
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
8569

    
8570
  def CheckPrereq(self):
8571
    """Check prerequisites.
8572

8573
    """
8574

    
8575
  def Exec(self, feedback_fn):
8576
    """Do the actual sleep.
8577

8578
    """
8579
    if self.op.on_master:
8580
      if not utils.TestDelay(self.op.duration):
8581
        raise errors.OpExecError("Error during master delay test")
8582
    if self.op.on_nodes:
8583
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
8584
      for node, node_result in result.items():
8585
        node_result.Raise("Failure during rpc call to node %s" % node)
8586

    
8587

    
8588
class IAllocator(object):
8589
  """IAllocator framework.
8590

8591
  An IAllocator instance has three sets of attributes:
8592
    - cfg that is needed to query the cluster
8593
    - input data (all members of the _KEYS class attribute are required)
8594
    - four buffer attributes (in|out_data|text), that represent the
8595
      input (to the external script) in text and data structure format,
8596
      and the output from it, again in two formats
8597
    - the result variables from the script (success, info, nodes) for
8598
      easy usage
8599

8600
  """
8601
  # pylint: disable-msg=R0902
8602
  # lots of instance attributes
8603
  _ALLO_KEYS = [
8604
    "name", "mem_size", "disks", "disk_template",
8605
    "os", "tags", "nics", "vcpus", "hypervisor",
8606
    ]
8607
  _RELO_KEYS = [
8608
    "name", "relocate_from",
8609
    ]
8610
  _EVAC_KEYS = [
8611
    "evac_nodes",
8612
    ]
8613

    
8614
  def __init__(self, cfg, rpc, mode, **kwargs):
8615
    self.cfg = cfg
8616
    self.rpc = rpc
8617
    # init buffer variables
8618
    self.in_text = self.out_text = self.in_data = self.out_data = None
8619
    # init all input fields so that pylint is happy
8620
    self.mode = mode
8621
    self.mem_size = self.disks = self.disk_template = None
8622
    self.os = self.tags = self.nics = self.vcpus = None
8623
    self.hypervisor = None
8624
    self.relocate_from = None
8625
    self.name = None
8626
    self.evac_nodes = None
8627
    # computed fields
8628
    self.required_nodes = None
8629
    # init result fields
8630
    self.success = self.info = self.result = None
8631
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
8632
      keyset = self._ALLO_KEYS
8633
      fn = self._AddNewInstance
8634
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
8635
      keyset = self._RELO_KEYS
8636
      fn = self._AddRelocateInstance
8637
    elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
8638
      keyset = self._EVAC_KEYS
8639
      fn = self._AddEvacuateNodes
8640
    else:
8641
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
8642
                                   " IAllocator" % self.mode)
8643
    for key in kwargs:
8644
      if key not in keyset:
8645
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
8646
                                     " IAllocator" % key)
8647
      setattr(self, key, kwargs[key])
8648

    
8649
    for key in keyset:
8650
      if key not in kwargs:
8651
        raise errors.ProgrammerError("Missing input parameter '%s' to"
8652
                                     " IAllocator" % key)
8653
    self._BuildInputData(fn)
8654

    
8655
  def _ComputeClusterData(self):
8656
    """Compute the generic allocator input data.
8657

8658
    This is the data that is independent of the actual operation.
8659

8660
    """
8661
    cfg = self.cfg
8662
    cluster_info = cfg.GetClusterInfo()
8663
    # cluster data
8664
    data = {
8665
      "version": constants.IALLOCATOR_VERSION,
8666
      "cluster_name": cfg.GetClusterName(),
8667
      "cluster_tags": list(cluster_info.GetTags()),
8668
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
8669
      # we don't have job IDs
8670
      }
8671
    iinfo = cfg.GetAllInstancesInfo().values()
8672
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
8673

    
8674
    # node data
8675
    node_results = {}
8676
    node_list = cfg.GetNodeList()
8677

    
8678
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
8679
      hypervisor_name = self.hypervisor
8680
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
8681
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
8682
    elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
8683
      hypervisor_name = cluster_info.enabled_hypervisors[0]
8684

    
8685
    node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
8686
                                        hypervisor_name)
8687
    node_iinfo = \
8688
      self.rpc.call_all_instances_info(node_list,
8689
                                       cluster_info.enabled_hypervisors)
8690
    for nname, nresult in node_data.items():
8691
      # first fill in static (config-based) values
8692
      ninfo = cfg.GetNodeInfo(nname)
8693
      pnr = {
8694
        "tags": list(ninfo.GetTags()),
8695
        "primary_ip": ninfo.primary_ip,
8696
        "secondary_ip": ninfo.secondary_ip,
8697
        "offline": ninfo.offline,
8698
        "drained": ninfo.drained,
8699
        "master_candidate": ninfo.master_candidate,
8700
        }
8701

    
8702
      if not (ninfo.offline or ninfo.drained):
8703
        nresult.Raise("Can't get data for node %s" % nname)
8704
        node_iinfo[nname].Raise("Can't get node instance info from node %s" %
8705
                                nname)
8706
        remote_info = nresult.payload
8707

    
8708
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
8709
                     'vg_size', 'vg_free', 'cpu_total']:
8710
          if attr not in remote_info:
8711
            raise errors.OpExecError("Node '%s' didn't return attribute"
8712
                                     " '%s'" % (nname, attr))
8713
          if not isinstance(remote_info[attr], int):
8714
            raise errors.OpExecError("Node '%s' returned invalid value"
8715
                                     " for '%s': %s" %
8716
                                     (nname, attr, remote_info[attr]))
8717
        # compute memory used by primary instances
8718
        i_p_mem = i_p_up_mem = 0
8719
        for iinfo, beinfo in i_list:
8720
          if iinfo.primary_node == nname:
8721
            i_p_mem += beinfo[constants.BE_MEMORY]
8722
            if iinfo.name not in node_iinfo[nname].payload:
8723
              i_used_mem = 0
8724
            else:
8725
              i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory'])
8726
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
8727
            remote_info['memory_free'] -= max(0, i_mem_diff)
8728

    
8729
            if iinfo.admin_up:
8730
              i_p_up_mem += beinfo[constants.BE_MEMORY]
8731

    
8732
        # compute memory used by instances
8733
        pnr_dyn = {
8734
          "total_memory": remote_info['memory_total'],
8735
          "reserved_memory": remote_info['memory_dom0'],
8736
          "free_memory": remote_info['memory_free'],
8737
          "total_disk": remote_info['vg_size'],
8738
          "free_disk": remote_info['vg_free'],
8739
          "total_cpus": remote_info['cpu_total'],
8740
          "i_pri_memory": i_p_mem,
8741
          "i_pri_up_memory": i_p_up_mem,
8742
          }
8743
        pnr.update(pnr_dyn)
8744

    
8745
      node_results[nname] = pnr
8746
    data["nodes"] = node_results
8747

    
8748
    # instance data
8749
    instance_data = {}
8750
    for iinfo, beinfo in i_list:
8751
      nic_data = []
8752
      for nic in iinfo.nics:
8753
        filled_params = objects.FillDict(
8754
            cluster_info.nicparams[constants.PP_DEFAULT],
8755
            nic.nicparams)
8756
        nic_dict = {"mac": nic.mac,
8757
                    "ip": nic.ip,
8758
                    "mode": filled_params[constants.NIC_MODE],
8759
                    "link": filled_params[constants.NIC_LINK],
8760
                   }
8761
        if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
8762
          nic_dict["bridge"] = filled_params[constants.NIC_LINK]
8763
        nic_data.append(nic_dict)
8764
      pir = {
8765
        "tags": list(iinfo.GetTags()),
8766
        "admin_up": iinfo.admin_up,
8767
        "vcpus": beinfo[constants.BE_VCPUS],
8768
        "memory": beinfo[constants.BE_MEMORY],
8769
        "os": iinfo.os,
8770
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
8771
        "nics": nic_data,
8772
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
8773
        "disk_template": iinfo.disk_template,
8774
        "hypervisor": iinfo.hypervisor,
8775
        }
8776
      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
8777
                                                 pir["disks"])
8778
      instance_data[iinfo.name] = pir
8779

    
8780
    data["instances"] = instance_data
8781

    
8782
    self.in_data = data
8783

    
8784
  def _AddNewInstance(self):
8785
    """Add new instance data to allocator structure.
8786

8787
    This in combination with _AllocatorGetClusterData will create the
8788
    correct structure needed as input for the allocator.
8789

8790
    The checks for the completeness of the opcode must have already been
8791
    done.
8792

8793
    """
8794
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
8795

    
8796
    if self.disk_template in constants.DTS_NET_MIRROR:
8797
      self.required_nodes = 2
8798
    else:
8799
      self.required_nodes = 1
8800
    request = {
8801
      "name": self.name,
8802
      "disk_template": self.disk_template,
8803
      "tags": self.tags,
8804
      "os": self.os,
8805
      "vcpus": self.vcpus,
8806
      "memory": self.mem_size,
8807
      "disks": self.disks,
8808
      "disk_space_total": disk_space,
8809
      "nics": self.nics,
8810
      "required_nodes": self.required_nodes,
8811
      }
8812
    return request
8813

    
8814
  def _AddRelocateInstance(self):
8815
    """Add relocate instance data to allocator structure.
8816

8817
    This in combination with _IAllocatorGetClusterData will create the
8818
    correct structure needed as input for the allocator.
8819

8820
    The checks for the completeness of the opcode must have already been
8821
    done.
8822

8823
    """
8824
    instance = self.cfg.GetInstanceInfo(self.name)
8825
    if instance is None:
8826
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
8827
                                   " IAllocator" % self.name)
8828

    
8829
    if instance.disk_template not in constants.DTS_NET_MIRROR:
8830
      raise errors.OpPrereqError("Can't relocate non-mirrored instances",
8831
                                 errors.ECODE_INVAL)
8832

    
8833
    if len(instance.secondary_nodes) != 1:
8834
      raise errors.OpPrereqError("Instance has not exactly one secondary node",
8835
                                 errors.ECODE_STATE)
8836

    
8837
    self.required_nodes = 1
8838
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
8839
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
8840

    
8841
    request = {
8842
      "name": self.name,
8843
      "disk_space_total": disk_space,
8844
      "required_nodes": self.required_nodes,
8845
      "relocate_from": self.relocate_from,
8846
      }
8847
    return request
8848

    
8849
  def _AddEvacuateNodes(self):
8850
    """Add evacuate nodes data to allocator structure.
8851

8852
    """
8853
    request = {
8854
      "evac_nodes": self.evac_nodes
8855
      }
8856
    return request
8857

    
8858
  def _BuildInputData(self, fn):
8859
    """Build input data structures.
8860

8861
    """
8862
    self._ComputeClusterData()
8863

    
8864
    request = fn()
8865
    request["type"] = self.mode
8866
    self.in_data["request"] = request
8867

    
8868
    self.in_text = serializer.Dump(self.in_data)
8869

    
8870
  def Run(self, name, validate=True, call_fn=None):
8871
    """Run an instance allocator and return the results.
8872

8873
    """
8874
    if call_fn is None:
8875
      call_fn = self.rpc.call_iallocator_runner
8876

    
8877
    result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
8878
    result.Raise("Failure while running the iallocator script")
8879

    
8880
    self.out_text = result.payload
8881
    if validate:
8882
      self._ValidateResult()
8883

    
8884
  def _ValidateResult(self):
8885
    """Process the allocator results.
8886

8887
    This will process and if successful save the result in
8888
    self.out_data and the other parameters.
8889

8890
    """
8891
    try:
8892
      rdict = serializer.Load(self.out_text)
8893
    except Exception, err:
8894
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
8895

    
8896
    if not isinstance(rdict, dict):
8897
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
8898

    
8899
    # TODO: remove backwards compatiblity in later versions
8900
    if "nodes" in rdict and "result" not in rdict:
8901
      rdict["result"] = rdict["nodes"]
8902
      del rdict["nodes"]
8903

    
8904
    for key in "success", "info", "result":
8905
      if key not in rdict:
8906
        raise errors.OpExecError("Can't parse iallocator results:"
8907
                                 " missing key '%s'" % key)
8908
      setattr(self, key, rdict[key])
8909

    
8910
    if not isinstance(rdict["result"], list):
8911
      raise errors.OpExecError("Can't parse iallocator results: 'result' key"
8912
                               " is not a list")
8913
    self.out_data = rdict
8914

    
8915

    
8916
class LUTestAllocator(NoHooksLU):
8917
  """Run allocator tests.
8918

8919
  This LU runs the allocator tests
8920

8921
  """
8922
  _OP_REQP = ["direction", "mode", "name"]
8923

    
8924
  def CheckPrereq(self):
8925
    """Check prerequisites.
8926

8927
    This checks the opcode parameters depending on the director and mode test.
8928

8929
    """
8930
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
8931
      for attr in ["name", "mem_size", "disks", "disk_template",
8932
                   "os", "tags", "nics", "vcpus"]:
8933
        if not hasattr(self.op, attr):
8934
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
8935
                                     attr, errors.ECODE_INVAL)
8936
      iname = self.cfg.ExpandInstanceName(self.op.name)
8937
      if iname is not None:
8938
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
8939
                                   iname, errors.ECODE_EXISTS)
8940
      if not isinstance(self.op.nics, list):
8941
        raise errors.OpPrereqError("Invalid parameter 'nics'",
8942
                                   errors.ECODE_INVAL)
8943
      for row in self.op.nics:
8944
        if (not isinstance(row, dict) or
8945
            "mac" not in row or
8946
            "ip" not in row or
8947
            "bridge" not in row):
8948
          raise errors.OpPrereqError("Invalid contents of the 'nics'"
8949
                                     " parameter", errors.ECODE_INVAL)
8950
      if not isinstance(self.op.disks, list):
8951
        raise errors.OpPrereqError("Invalid parameter 'disks'",
8952
                                   errors.ECODE_INVAL)
8953
      for row in self.op.disks:
8954
        if (not isinstance(row, dict) or
8955
            "size" not in row or
8956
            not isinstance(row["size"], int) or
8957
            "mode" not in row or
8958
            row["mode"] not in ['r', 'w']):
8959
          raise errors.OpPrereqError("Invalid contents of the 'disks'"
8960
                                     " parameter", errors.ECODE_INVAL)
8961
      if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None:
8962
        self.op.hypervisor = self.cfg.GetHypervisorType()
8963
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
8964
      if not hasattr(self.op, "name"):
8965
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input",
8966
                                   errors.ECODE_INVAL)
8967
      fname = _ExpandInstanceName(self.cfg, self.op.name)
8968
      self.op.name = fname
8969
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
8970
    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
8971
      if not hasattr(self.op, "evac_nodes"):
8972
        raise errors.OpPrereqError("Missing attribute 'evac_nodes' on"
8973
                                   " opcode input", errors.ECODE_INVAL)
8974
    else:
8975
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
8976
                                 self.op.mode, errors.ECODE_INVAL)
8977

    
8978
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
8979
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
8980
        raise errors.OpPrereqError("Missing allocator name",
8981
                                   errors.ECODE_INVAL)
8982
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
8983
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
8984
                                 self.op.direction, errors.ECODE_INVAL)
8985

    
8986
  def Exec(self, feedback_fn):
8987
    """Run the allocator test.
8988

8989
    """
8990
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
8991
      ial = IAllocator(self.cfg, self.rpc,
8992
                       mode=self.op.mode,
8993
                       name=self.op.name,
8994
                       mem_size=self.op.mem_size,
8995
                       disks=self.op.disks,
8996
                       disk_template=self.op.disk_template,
8997
                       os=self.op.os,
8998
                       tags=self.op.tags,
8999
                       nics=self.op.nics,
9000
                       vcpus=self.op.vcpus,
9001
                       hypervisor=self.op.hypervisor,
9002
                       )
9003
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
9004
      ial = IAllocator(self.cfg, self.rpc,
9005
                       mode=self.op.mode,
9006
                       name=self.op.name,
9007
                       relocate_from=list(self.relocate_from),
9008
                       )
9009
    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
9010
      ial = IAllocator(self.cfg, self.rpc,
9011
                       mode=self.op.mode,
9012
                       evac_nodes=self.op.evac_nodes)
9013
    else:
9014
      raise errors.ProgrammerError("Uncatched mode %s in"
9015
                                   " LUTestAllocator.Exec", self.op.mode)
9016

    
9017
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
9018
      result = ial.in_text
9019
    else:
9020
      ial.Run(self.op.allocator, validate=False)
9021
      result = ial.out_text
9022
    return result