Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 24a40d57

History | View | Annotate | Download (137.3 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the master-side code."""
23

    
24
# pylint: disable-msg=W0613,W0201
25

    
26
import os
27
import os.path
28
import sha
29
import socket
30
import time
31
import tempfile
32
import re
33
import platform
34

    
35
from ganeti import rpc
36
from ganeti import ssh
37
from ganeti import logger
38
from ganeti import utils
39
from ganeti import errors
40
from ganeti import hypervisor
41
from ganeti import config
42
from ganeti import constants
43
from ganeti import objects
44
from ganeti import opcodes
45
from ganeti import ssconf
46

    
47
class LogicalUnit(object):
48
  """Logical Unit base class.
49

50
  Subclasses must follow these rules:
51
    - implement CheckPrereq which also fills in the opcode instance
52
      with all the fields (even if as None)
53
    - implement Exec
54
    - implement BuildHooksEnv
55
    - redefine HPATH and HTYPE
56
    - optionally redefine their run requirements (REQ_CLUSTER,
57
      REQ_MASTER); note that all commands require root permissions
58

59
  """
60
  HPATH = None
61
  HTYPE = None
62
  _OP_REQP = []
63
  REQ_CLUSTER = True
64
  REQ_MASTER = True
65

    
66
  def __init__(self, processor, op, cfg, sstore):
67
    """Constructor for LogicalUnit.
68

69
    This needs to be overriden in derived classes in order to check op
70
    validity.
71

72
    """
73
    self.processor = processor
74
    self.op = op
75
    self.cfg = cfg
76
    self.sstore = sstore
77
    for attr_name in self._OP_REQP:
78
      attr_val = getattr(op, attr_name, None)
79
      if attr_val is None:
80
        raise errors.OpPrereqError("Required parameter '%s' missing" %
81
                                   attr_name)
82
    if self.REQ_CLUSTER:
83
      if not cfg.IsCluster():
84
        raise errors.OpPrereqError("Cluster not initialized yet,"
85
                                   " use 'gnt-cluster init' first.")
86
      if self.REQ_MASTER:
87
        master = sstore.GetMasterNode()
88
        if master != utils.HostInfo().name:
89
          raise errors.OpPrereqError("Commands must be run on the master"
90
                                     " node %s" % master)
91

    
92
  def CheckPrereq(self):
93
    """Check prerequisites for this LU.
94

95
    This method should check that the prerequisites for the execution
96
    of this LU are fulfilled. It can do internode communication, but
97
    it should be idempotent - no cluster or system changes are
98
    allowed.
99

100
    The method should raise errors.OpPrereqError in case something is
101
    not fulfilled. Its return value is ignored.
102

103
    This method should also update all the parameters of the opcode to
104
    their canonical form; e.g. a short node name must be fully
105
    expanded after this method has successfully completed (so that
106
    hooks, logging, etc. work correctly).
107

108
    """
109
    raise NotImplementedError
110

    
111
  def Exec(self, feedback_fn):
112
    """Execute the LU.
113

114
    This method should implement the actual work. It should raise
115
    errors.OpExecError for failures that are somewhat dealt with in
116
    code, or expected.
117

118
    """
119
    raise NotImplementedError
120

    
121
  def BuildHooksEnv(self):
122
    """Build hooks environment for this LU.
123

124
    This method should return a three-node tuple consisting of: a dict
125
    containing the environment that will be used for running the
126
    specific hook for this LU, a list of node names on which the hook
127
    should run before the execution, and a list of node names on which
128
    the hook should run after the execution.
129

130
    The keys of the dict must not have 'GANETI_' prefixed as this will
131
    be handled in the hooks runner. Also note additional keys will be
132
    added by the hooks runner. If the LU doesn't define any
133
    environment, an empty dict (and not None) should be returned.
134

135
    As for the node lists, the master should not be included in the
136
    them, as it will be added by the hooks runner in case this LU
137
    requires a cluster to run on (otherwise we don't have a node
138
    list). No nodes should be returned as an empty list (and not
139
    None).
140

141
    Note that if the HPATH for a LU class is None, this function will
142
    not be called.
143

144
    """
145
    raise NotImplementedError
146

    
147

    
148
class NoHooksLU(LogicalUnit):
149
  """Simple LU which runs no hooks.
150

151
  This LU is intended as a parent for other LogicalUnits which will
152
  run no hooks, in order to reduce duplicate code.
153

154
  """
155
  HPATH = None
156
  HTYPE = None
157

    
158
  def BuildHooksEnv(self):
159
    """Build hooks env.
160

161
    This is a no-op, since we don't run hooks.
162

163
    """
164
    return {}, [], []
165

    
166

    
167
def _GetWantedNodes(lu, nodes):
168
  """Returns list of checked and expanded node names.
169

170
  Args:
171
    nodes: List of nodes (strings) or None for all
172

173
  """
174
  if not isinstance(nodes, list):
175
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
176

    
177
  if nodes:
178
    wanted = []
179

    
180
    for name in nodes:
181
      node = lu.cfg.ExpandNodeName(name)
182
      if node is None:
183
        raise errors.OpPrereqError("No such node name '%s'" % name)
184
      wanted.append(node)
185

    
186
  else:
187
    wanted = lu.cfg.GetNodeList()
188
  return utils.NiceSort(wanted)
189

    
190

    
191
def _GetWantedInstances(lu, instances):
192
  """Returns list of checked and expanded instance names.
193

194
  Args:
195
    instances: List of instances (strings) or None for all
196

197
  """
198
  if not isinstance(instances, list):
199
    raise errors.OpPrereqError("Invalid argument type 'instances'")
200

    
201
  if instances:
202
    wanted = []
203

    
204
    for name in instances:
205
      instance = lu.cfg.ExpandInstanceName(name)
206
      if instance is None:
207
        raise errors.OpPrereqError("No such instance name '%s'" % name)
208
      wanted.append(instance)
209

    
210
  else:
211
    wanted = lu.cfg.GetInstanceList()
212
  return utils.NiceSort(wanted)
213

    
214

    
215
def _CheckOutputFields(static, dynamic, selected):
216
  """Checks whether all selected fields are valid.
217

218
  Args:
219
    static: Static fields
220
    dynamic: Dynamic fields
221

222
  """
223
  static_fields = frozenset(static)
224
  dynamic_fields = frozenset(dynamic)
225

    
226
  all_fields = static_fields | dynamic_fields
227

    
228
  if not all_fields.issuperset(selected):
229
    raise errors.OpPrereqError("Unknown output fields selected: %s"
230
                               % ",".join(frozenset(selected).
231
                                          difference(all_fields)))
232

    
233

    
234
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
235
                          memory, vcpus, nics):
236
  """Builds instance related env variables for hooks from single variables.
237

238
  Args:
239
    secondary_nodes: List of secondary nodes as strings
240
  """
241
  env = {
242
    "OP_TARGET": name,
243
    "INSTANCE_NAME": name,
244
    "INSTANCE_PRIMARY": primary_node,
245
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
246
    "INSTANCE_OS_TYPE": os_type,
247
    "INSTANCE_STATUS": status,
248
    "INSTANCE_MEMORY": memory,
249
    "INSTANCE_VCPUS": vcpus,
250
  }
251

    
252
  if nics:
253
    nic_count = len(nics)
254
    for idx, (ip, bridge) in enumerate(nics):
255
      if ip is None:
256
        ip = ""
257
      env["INSTANCE_NIC%d_IP" % idx] = ip
258
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
259
  else:
260
    nic_count = 0
261

    
262
  env["INSTANCE_NIC_COUNT"] = nic_count
263

    
264
  return env
265

    
266

    
267
def _BuildInstanceHookEnvByObject(instance, override=None):
268
  """Builds instance related env variables for hooks from an object.
269

270
  Args:
271
    instance: objects.Instance object of instance
272
    override: dict of values to override
273
  """
274
  args = {
275
    'name': instance.name,
276
    'primary_node': instance.primary_node,
277
    'secondary_nodes': instance.secondary_nodes,
278
    'os_type': instance.os,
279
    'status': instance.os,
280
    'memory': instance.memory,
281
    'vcpus': instance.vcpus,
282
    'nics': [(nic.ip, nic.bridge) for nic in instance.nics],
283
  }
284
  if override:
285
    args.update(override)
286
  return _BuildInstanceHookEnv(**args)
287

    
288

    
289
def _UpdateEtcHosts(fullnode, ip):
290
  """Ensure a node has a correct entry in /etc/hosts.
291

292
  Args:
293
    fullnode - Fully qualified domain name of host. (str)
294
    ip       - IPv4 address of host (str)
295

296
  """
297
  node = fullnode.split(".", 1)[0]
298

    
299
  f = open('/etc/hosts', 'r+')
300

    
301
  inthere = False
302

    
303
  save_lines = []
304
  add_lines = []
305
  removed = False
306

    
307
  while True:
308
    rawline = f.readline()
309

    
310
    if not rawline:
311
      # End of file
312
      break
313

    
314
    line = rawline.split('\n')[0]
315

    
316
    # Strip off comments
317
    line = line.split('#')[0]
318

    
319
    if not line:
320
      # Entire line was comment, skip
321
      save_lines.append(rawline)
322
      continue
323

    
324
    fields = line.split()
325

    
326
    haveall = True
327
    havesome = False
328
    for spec in [ ip, fullnode, node ]:
329
      if spec not in fields:
330
        haveall = False
331
      if spec in fields:
332
        havesome = True
333

    
334
    if haveall:
335
      inthere = True
336
      save_lines.append(rawline)
337
      continue
338

    
339
    if havesome and not haveall:
340
      # Line (old, or manual?) which is missing some.  Remove.
341
      removed = True
342
      continue
343

    
344
    save_lines.append(rawline)
345

    
346
  if not inthere:
347
    add_lines.append('%s\t%s %s\n' % (ip, fullnode, node))
348

    
349
  if removed:
350
    if add_lines:
351
      save_lines = save_lines + add_lines
352

    
353
    # We removed a line, write a new file and replace old.
354
    fd, tmpname = tempfile.mkstemp('tmp', 'hosts_', '/etc')
355
    newfile = os.fdopen(fd, 'w')
356
    newfile.write(''.join(save_lines))
357
    newfile.close()
358
    os.rename(tmpname, '/etc/hosts')
359

    
360
  elif add_lines:
361
    # Simply appending a new line will do the trick.
362
    f.seek(0, 2)
363
    for add in add_lines:
364
      f.write(add)
365

    
366
  f.close()
367

    
368

    
369
def _UpdateKnownHosts(fullnode, ip, pubkey):
370
  """Ensure a node has a correct known_hosts entry.
371

372
  Args:
373
    fullnode - Fully qualified domain name of host. (str)
374
    ip       - IPv4 address of host (str)
375
    pubkey   - the public key of the cluster
376

377
  """
378
  if os.path.exists(constants.SSH_KNOWN_HOSTS_FILE):
379
    f = open(constants.SSH_KNOWN_HOSTS_FILE, 'r+')
380
  else:
381
    f = open(constants.SSH_KNOWN_HOSTS_FILE, 'w+')
382

    
383
  inthere = False
384

    
385
  save_lines = []
386
  add_lines = []
387
  removed = False
388

    
389
  for rawline in f:
390
    logger.Debug('read %s' % (repr(rawline),))
391

    
392
    parts = rawline.rstrip('\r\n').split()
393

    
394
    # Ignore unwanted lines
395
    if len(parts) >= 3 and not rawline.lstrip()[0] == '#':
396
      fields = parts[0].split(',')
397
      key = parts[2]
398

    
399
      haveall = True
400
      havesome = False
401
      for spec in [ ip, fullnode ]:
402
        if spec not in fields:
403
          haveall = False
404
        if spec in fields:
405
          havesome = True
406

    
407
      logger.Debug("key, pubkey = %s." % (repr((key, pubkey)),))
408
      if haveall and key == pubkey:
409
        inthere = True
410
        save_lines.append(rawline)
411
        logger.Debug("Keeping known_hosts '%s'." % (repr(rawline),))
412
        continue
413

    
414
      if havesome and (not haveall or key != pubkey):
415
        removed = True
416
        logger.Debug("Discarding known_hosts '%s'." % (repr(rawline),))
417
        continue
418

    
419
    save_lines.append(rawline)
420

    
421
  if not inthere:
422
    add_lines.append('%s,%s ssh-rsa %s\n' % (fullnode, ip, pubkey))
423
    logger.Debug("Adding known_hosts '%s'." % (repr(add_lines[-1]),))
424

    
425
  if removed:
426
    save_lines = save_lines + add_lines
427

    
428
    # Write a new file and replace old.
429
    fd, tmpname = tempfile.mkstemp('.tmp', 'known_hosts.',
430
                                   constants.DATA_DIR)
431
    newfile = os.fdopen(fd, 'w')
432
    try:
433
      newfile.write(''.join(save_lines))
434
    finally:
435
      newfile.close()
436
    logger.Debug("Wrote new known_hosts.")
437
    os.rename(tmpname, constants.SSH_KNOWN_HOSTS_FILE)
438

    
439
  elif add_lines:
440
    # Simply appending a new line will do the trick.
441
    f.seek(0, 2)
442
    for add in add_lines:
443
      f.write(add)
444

    
445
  f.close()
446

    
447

    
448
def _HasValidVG(vglist, vgname):
449
  """Checks if the volume group list is valid.
450

451
  A non-None return value means there's an error, and the return value
452
  is the error message.
453

454
  """
455
  vgsize = vglist.get(vgname, None)
456
  if vgsize is None:
457
    return "volume group '%s' missing" % vgname
458
  elif vgsize < 20480:
459
    return ("volume group '%s' too small (20480MiB required, %dMib found)" %
460
            (vgname, vgsize))
461
  return None
462

    
463

    
464
def _InitSSHSetup(node):
465
  """Setup the SSH configuration for the cluster.
466

467

468
  This generates a dsa keypair for root, adds the pub key to the
469
  permitted hosts and adds the hostkey to its own known hosts.
470

471
  Args:
472
    node: the name of this host as a fqdn
473

474
  """
475
  priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
476

    
477
  for name in priv_key, pub_key:
478
    if os.path.exists(name):
479
      utils.CreateBackup(name)
480
    utils.RemoveFile(name)
481

    
482
  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
483
                         "-f", priv_key,
484
                         "-q", "-N", ""])
485
  if result.failed:
486
    raise errors.OpExecError("Could not generate ssh keypair, error %s" %
487
                             result.output)
488

    
489
  f = open(pub_key, 'r')
490
  try:
491
    utils.AddAuthorizedKey(auth_keys, f.read(8192))
492
  finally:
493
    f.close()
494

    
495

    
496
def _InitGanetiServerSetup(ss):
497
  """Setup the necessary configuration for the initial node daemon.
498

499
  This creates the nodepass file containing the shared password for
500
  the cluster and also generates the SSL certificate.
501

502
  """
503
  # Create pseudo random password
504
  randpass = sha.new(os.urandom(64)).hexdigest()
505
  # and write it into sstore
506
  ss.SetKey(ss.SS_NODED_PASS, randpass)
507

    
508
  result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
509
                         "-days", str(365*5), "-nodes", "-x509",
510
                         "-keyout", constants.SSL_CERT_FILE,
511
                         "-out", constants.SSL_CERT_FILE, "-batch"])
512
  if result.failed:
513
    raise errors.OpExecError("could not generate server ssl cert, command"
514
                             " %s had exitcode %s and error message %s" %
515
                             (result.cmd, result.exit_code, result.output))
516

    
517
  os.chmod(constants.SSL_CERT_FILE, 0400)
518

    
519
  result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
520

    
521
  if result.failed:
522
    raise errors.OpExecError("Could not start the node daemon, command %s"
523
                             " had exitcode %s and error %s" %
524
                             (result.cmd, result.exit_code, result.output))
525

    
526

    
527
def _CheckInstanceBridgesExist(instance):
528
  """Check that the brigdes needed by an instance exist.
529

530
  """
531
  # check bridges existance
532
  brlist = [nic.bridge for nic in instance.nics]
533
  if not rpc.call_bridges_exist(instance.primary_node, brlist):
534
    raise errors.OpPrereqError("one or more target bridges %s does not"
535
                               " exist on destination node '%s'" %
536
                               (brlist, instance.primary_node))
537

    
538

    
539
class LUInitCluster(LogicalUnit):
540
  """Initialise the cluster.
541

542
  """
543
  HPATH = "cluster-init"
544
  HTYPE = constants.HTYPE_CLUSTER
545
  _OP_REQP = ["cluster_name", "hypervisor_type", "vg_name", "mac_prefix",
546
              "def_bridge", "master_netdev"]
547
  REQ_CLUSTER = False
548

    
549
  def BuildHooksEnv(self):
550
    """Build hooks env.
551

552
    Notes: Since we don't require a cluster, we must manually add
553
    ourselves in the post-run node list.
554

555
    """
556
    env = {"OP_TARGET": self.op.cluster_name}
557
    return env, [], [self.hostname.name]
558

    
559
  def CheckPrereq(self):
560
    """Verify that the passed name is a valid one.
561

562
    """
563
    if config.ConfigWriter.IsCluster():
564
      raise errors.OpPrereqError("Cluster is already initialised")
565

    
566
    self.hostname = hostname = utils.HostInfo()
567

    
568
    if hostname.ip.startswith("127."):
569
      raise errors.OpPrereqError("This host's IP resolves to the private"
570
                                 " range (%s). Please fix DNS or /etc/hosts." %
571
                                 (hostname.ip,))
572

    
573
    self.clustername = clustername = utils.HostInfo(self.op.cluster_name)
574

    
575
    if not utils.TcpPing(constants.LOCALHOST_IP_ADDRESS, hostname.ip,
576
                         constants.DEFAULT_NODED_PORT):
577
      raise errors.OpPrereqError("Inconsistency: this host's name resolves"
578
                                 " to %s,\nbut this ip address does not"
579
                                 " belong to this host."
580
                                 " Aborting." % hostname.ip)
581

    
582
    secondary_ip = getattr(self.op, "secondary_ip", None)
583
    if secondary_ip and not utils.IsValidIP(secondary_ip):
584
      raise errors.OpPrereqError("Invalid secondary ip given")
585
    if (secondary_ip and
586
        secondary_ip != hostname.ip and
587
        (not utils.TcpPing(constants.LOCALHOST_IP_ADDRESS, secondary_ip,
588
                           constants.DEFAULT_NODED_PORT))):
589
      raise errors.OpPrereqError("You gave %s as secondary IP,\n"
590
                                 "but it does not belong to this host." %
591
                                 secondary_ip)
592
    self.secondary_ip = secondary_ip
593

    
594
    # checks presence of the volume group given
595
    vgstatus = _HasValidVG(utils.ListVolumeGroups(), self.op.vg_name)
596

    
597
    if vgstatus:
598
      raise errors.OpPrereqError("Error: %s" % vgstatus)
599

    
600
    if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$",
601
                    self.op.mac_prefix):
602
      raise errors.OpPrereqError("Invalid mac prefix given '%s'" %
603
                                 self.op.mac_prefix)
604

    
605
    if self.op.hypervisor_type not in hypervisor.VALID_HTYPES:
606
      raise errors.OpPrereqError("Invalid hypervisor type given '%s'" %
607
                                 self.op.hypervisor_type)
608

    
609
    result = utils.RunCmd(["ip", "link", "show", "dev", self.op.master_netdev])
610
    if result.failed:
611
      raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
612
                                 (self.op.master_netdev,
613
                                  result.output.strip()))
614

    
615
  def Exec(self, feedback_fn):
616
    """Initialize the cluster.
617

618
    """
619
    clustername = self.clustername
620
    hostname = self.hostname
621

    
622
    # set up the simple store
623
    self.sstore = ss = ssconf.SimpleStore()
624
    ss.SetKey(ss.SS_HYPERVISOR, self.op.hypervisor_type)
625
    ss.SetKey(ss.SS_MASTER_NODE, hostname.name)
626
    ss.SetKey(ss.SS_MASTER_IP, clustername.ip)
627
    ss.SetKey(ss.SS_MASTER_NETDEV, self.op.master_netdev)
628
    ss.SetKey(ss.SS_CLUSTER_NAME, clustername.name)
629

    
630
    # set up the inter-node password and certificate
631
    _InitGanetiServerSetup(ss)
632

    
633
    # start the master ip
634
    rpc.call_node_start_master(hostname.name)
635

    
636
    # set up ssh config and /etc/hosts
637
    f = open(constants.SSH_HOST_RSA_PUB, 'r')
638
    try:
639
      sshline = f.read()
640
    finally:
641
      f.close()
642
    sshkey = sshline.split(" ")[1]
643

    
644
    _UpdateEtcHosts(hostname.name, hostname.ip)
645

    
646
    _UpdateKnownHosts(hostname.name, hostname.ip, sshkey)
647

    
648
    _InitSSHSetup(hostname.name)
649

    
650
    # init of cluster config file
651
    self.cfg = cfgw = config.ConfigWriter()
652
    cfgw.InitConfig(hostname.name, hostname.ip, self.secondary_ip,
653
                    sshkey, self.op.mac_prefix,
654
                    self.op.vg_name, self.op.def_bridge)
655

    
656

    
657
class LUDestroyCluster(NoHooksLU):
658
  """Logical unit for destroying the cluster.
659

660
  """
661
  _OP_REQP = []
662

    
663
  def CheckPrereq(self):
664
    """Check prerequisites.
665

666
    This checks whether the cluster is empty.
667

668
    Any errors are signalled by raising errors.OpPrereqError.
669

670
    """
671
    master = self.sstore.GetMasterNode()
672

    
673
    nodelist = self.cfg.GetNodeList()
674
    if len(nodelist) != 1 or nodelist[0] != master:
675
      raise errors.OpPrereqError("There are still %d node(s) in"
676
                                 " this cluster." % (len(nodelist) - 1))
677
    instancelist = self.cfg.GetInstanceList()
678
    if instancelist:
679
      raise errors.OpPrereqError("There are still %d instance(s) in"
680
                                 " this cluster." % len(instancelist))
681

    
682
  def Exec(self, feedback_fn):
683
    """Destroys the cluster.
684

685
    """
686
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
687
    utils.CreateBackup(priv_key)
688
    utils.CreateBackup(pub_key)
689
    rpc.call_node_leave_cluster(self.sstore.GetMasterNode())
690

    
691

    
692
class LUVerifyCluster(NoHooksLU):
693
  """Verifies the cluster status.
694

695
  """
696
  _OP_REQP = []
697

    
698
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
699
                  remote_version, feedback_fn):
700
    """Run multiple tests against a node.
701

702
    Test list:
703
      - compares ganeti version
704
      - checks vg existance and size > 20G
705
      - checks config file checksum
706
      - checks ssh to other nodes
707

708
    Args:
709
      node: name of the node to check
710
      file_list: required list of files
711
      local_cksum: dictionary of local files and their checksums
712

713
    """
714
    # compares ganeti version
715
    local_version = constants.PROTOCOL_VERSION
716
    if not remote_version:
717
      feedback_fn(" - ERROR: connection to %s failed" % (node))
718
      return True
719

    
720
    if local_version != remote_version:
721
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
722
                      (local_version, node, remote_version))
723
      return True
724

    
725
    # checks vg existance and size > 20G
726

    
727
    bad = False
728
    if not vglist:
729
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
730
                      (node,))
731
      bad = True
732
    else:
733
      vgstatus = _HasValidVG(vglist, self.cfg.GetVGName())
734
      if vgstatus:
735
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
736
        bad = True
737

    
738
    # checks config file checksum
739
    # checks ssh to any
740

    
741
    if 'filelist' not in node_result:
742
      bad = True
743
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
744
    else:
745
      remote_cksum = node_result['filelist']
746
      for file_name in file_list:
747
        if file_name not in remote_cksum:
748
          bad = True
749
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
750
        elif remote_cksum[file_name] != local_cksum[file_name]:
751
          bad = True
752
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
753

    
754
    if 'nodelist' not in node_result:
755
      bad = True
756
      feedback_fn("  - ERROR: node hasn't returned node connectivity data")
757
    else:
758
      if node_result['nodelist']:
759
        bad = True
760
        for node in node_result['nodelist']:
761
          feedback_fn("  - ERROR: communication with node '%s': %s" %
762
                          (node, node_result['nodelist'][node]))
763
    hyp_result = node_result.get('hypervisor', None)
764
    if hyp_result is not None:
765
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
766
    return bad
767

    
768
  def _VerifyInstance(self, instance, node_vol_is, node_instance, feedback_fn):
769
    """Verify an instance.
770

771
    This function checks to see if the required block devices are
772
    available on the instance's node.
773

774
    """
775
    bad = False
776

    
777
    instancelist = self.cfg.GetInstanceList()
778
    if not instance in instancelist:
779
      feedback_fn("  - ERROR: instance %s not in instance list %s" %
780
                      (instance, instancelist))
781
      bad = True
782

    
783
    instanceconfig = self.cfg.GetInstanceInfo(instance)
784
    node_current = instanceconfig.primary_node
785

    
786
    node_vol_should = {}
787
    instanceconfig.MapLVsByNode(node_vol_should)
788

    
789
    for node in node_vol_should:
790
      for volume in node_vol_should[node]:
791
        if node not in node_vol_is or volume not in node_vol_is[node]:
792
          feedback_fn("  - ERROR: volume %s missing on node %s" %
793
                          (volume, node))
794
          bad = True
795

    
796
    if not instanceconfig.status == 'down':
797
      if not instance in node_instance[node_current]:
798
        feedback_fn("  - ERROR: instance %s not running on node %s" %
799
                        (instance, node_current))
800
        bad = True
801

    
802
    for node in node_instance:
803
      if (not node == node_current):
804
        if instance in node_instance[node]:
805
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
806
                          (instance, node))
807
          bad = True
808

    
809
    return bad
810

    
811
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
812
    """Verify if there are any unknown volumes in the cluster.
813

814
    The .os, .swap and backup volumes are ignored. All other volumes are
815
    reported as unknown.
816

817
    """
818
    bad = False
819

    
820
    for node in node_vol_is:
821
      for volume in node_vol_is[node]:
822
        if node not in node_vol_should or volume not in node_vol_should[node]:
823
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
824
                      (volume, node))
825
          bad = True
826
    return bad
827

    
828
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
829
    """Verify the list of running instances.
830

831
    This checks what instances are running but unknown to the cluster.
832

833
    """
834
    bad = False
835
    for node in node_instance:
836
      for runninginstance in node_instance[node]:
837
        if runninginstance not in instancelist:
838
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
839
                          (runninginstance, node))
840
          bad = True
841
    return bad
842

    
843
  def CheckPrereq(self):
844
    """Check prerequisites.
845

846
    This has no prerequisites.
847

848
    """
849
    pass
850

    
851
  def Exec(self, feedback_fn):
852
    """Verify integrity of cluster, performing various test on nodes.
853

854
    """
855
    bad = False
856
    feedback_fn("* Verifying global settings")
857
    self.cfg.VerifyConfig()
858

    
859
    master = self.sstore.GetMasterNode()
860
    vg_name = self.cfg.GetVGName()
861
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
862
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
863
    node_volume = {}
864
    node_instance = {}
865

    
866
    # FIXME: verify OS list
867
    # do local checksums
868
    file_names = list(self.sstore.GetFileList())
869
    file_names.append(constants.SSL_CERT_FILE)
870
    file_names.append(constants.CLUSTER_CONF_FILE)
871
    local_checksums = utils.FingerprintFiles(file_names)
872

    
873
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
874
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
875
    all_instanceinfo = rpc.call_instance_list(nodelist)
876
    all_vglist = rpc.call_vg_list(nodelist)
877
    node_verify_param = {
878
      'filelist': file_names,
879
      'nodelist': nodelist,
880
      'hypervisor': None,
881
      }
882
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
883
    all_rversion = rpc.call_version(nodelist)
884

    
885
    for node in nodelist:
886
      feedback_fn("* Verifying node %s" % node)
887
      result = self._VerifyNode(node, file_names, local_checksums,
888
                                all_vglist[node], all_nvinfo[node],
889
                                all_rversion[node], feedback_fn)
890
      bad = bad or result
891

    
892
      # node_volume
893
      volumeinfo = all_volumeinfo[node]
894

    
895
      if type(volumeinfo) != dict:
896
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
897
        bad = True
898
        continue
899

    
900
      node_volume[node] = volumeinfo
901

    
902
      # node_instance
903
      nodeinstance = all_instanceinfo[node]
904
      if type(nodeinstance) != list:
905
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
906
        bad = True
907
        continue
908

    
909
      node_instance[node] = nodeinstance
910

    
911
    node_vol_should = {}
912

    
913
    for instance in instancelist:
914
      feedback_fn("* Verifying instance %s" % instance)
915
      result =  self._VerifyInstance(instance, node_volume, node_instance,
916
                                     feedback_fn)
917
      bad = bad or result
918

    
919
      inst_config = self.cfg.GetInstanceInfo(instance)
920

    
921
      inst_config.MapLVsByNode(node_vol_should)
922

    
923
    feedback_fn("* Verifying orphan volumes")
924
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
925
                                       feedback_fn)
926
    bad = bad or result
927

    
928
    feedback_fn("* Verifying remaining instances")
929
    result = self._VerifyOrphanInstances(instancelist, node_instance,
930
                                         feedback_fn)
931
    bad = bad or result
932

    
933
    return int(bad)
934

    
935

    
936
class LURenameCluster(LogicalUnit):
937
  """Rename the cluster.
938

939
  """
940
  HPATH = "cluster-rename"
941
  HTYPE = constants.HTYPE_CLUSTER
942
  _OP_REQP = ["name"]
943

    
944
  def BuildHooksEnv(self):
945
    """Build hooks env.
946

947
    """
948
    env = {
949
      "OP_TARGET": self.op.sstore.GetClusterName(),
950
      "NEW_NAME": self.op.name,
951
      }
952
    mn = self.sstore.GetMasterNode()
953
    return env, [mn], [mn]
954

    
955
  def CheckPrereq(self):
956
    """Verify that the passed name is a valid one.
957

958
    """
959
    hostname = utils.HostInfo(self.op.name)
960

    
961
    new_name = hostname.name
962
    self.ip = new_ip = hostname.ip
963
    old_name = self.sstore.GetClusterName()
964
    old_ip = self.sstore.GetMasterIP()
965
    if new_name == old_name and new_ip == old_ip:
966
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
967
                                 " cluster has changed")
968
    if new_ip != old_ip:
969
      result = utils.RunCmd(["fping", "-q", new_ip])
970
      if not result.failed:
971
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
972
                                   " reachable on the network. Aborting." %
973
                                   new_ip)
974

    
975
    self.op.name = new_name
976

    
977
  def Exec(self, feedback_fn):
978
    """Rename the cluster.
979

980
    """
981
    clustername = self.op.name
982
    ip = self.ip
983
    ss = self.sstore
984

    
985
    # shutdown the master IP
986
    master = ss.GetMasterNode()
987
    if not rpc.call_node_stop_master(master):
988
      raise errors.OpExecError("Could not disable the master role")
989

    
990
    try:
991
      # modify the sstore
992
      ss.SetKey(ss.SS_MASTER_IP, ip)
993
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
994

    
995
      # Distribute updated ss config to all nodes
996
      myself = self.cfg.GetNodeInfo(master)
997
      dist_nodes = self.cfg.GetNodeList()
998
      if myself.name in dist_nodes:
999
        dist_nodes.remove(myself.name)
1000

    
1001
      logger.Debug("Copying updated ssconf data to all nodes")
1002
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1003
        fname = ss.KeyToFilename(keyname)
1004
        result = rpc.call_upload_file(dist_nodes, fname)
1005
        for to_node in dist_nodes:
1006
          if not result[to_node]:
1007
            logger.Error("copy of file %s to node %s failed" %
1008
                         (fname, to_node))
1009
    finally:
1010
      if not rpc.call_node_start_master(master):
1011
        logger.Error("Could not re-enable the master role on the master,\n"
1012
                     "please restart manually.")
1013

    
1014

    
1015
def _WaitForSync(cfgw, instance, oneshot=False, unlock=False):
1016
  """Sleep and poll for an instance's disk to sync.
1017

1018
  """
1019
  if not instance.disks:
1020
    return True
1021

    
1022
  if not oneshot:
1023
    logger.ToStdout("Waiting for instance %s to sync disks." % instance.name)
1024

    
1025
  node = instance.primary_node
1026

    
1027
  for dev in instance.disks:
1028
    cfgw.SetDiskID(dev, node)
1029

    
1030
  retries = 0
1031
  while True:
1032
    max_time = 0
1033
    done = True
1034
    cumul_degraded = False
1035
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1036
    if not rstats:
1037
      logger.ToStderr("Can't get any data from node %s" % node)
1038
      retries += 1
1039
      if retries >= 10:
1040
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1041
                                 " aborting." % node)
1042
      time.sleep(6)
1043
      continue
1044
    retries = 0
1045
    for i in range(len(rstats)):
1046
      mstat = rstats[i]
1047
      if mstat is None:
1048
        logger.ToStderr("Can't compute data for node %s/%s" %
1049
                        (node, instance.disks[i].iv_name))
1050
        continue
1051
      perc_done, est_time, is_degraded = mstat
1052
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1053
      if perc_done is not None:
1054
        done = False
1055
        if est_time is not None:
1056
          rem_time = "%d estimated seconds remaining" % est_time
1057
          max_time = est_time
1058
        else:
1059
          rem_time = "no time estimate"
1060
        logger.ToStdout("- device %s: %5.2f%% done, %s" %
1061
                        (instance.disks[i].iv_name, perc_done, rem_time))
1062
    if done or oneshot:
1063
      break
1064

    
1065
    if unlock:
1066
      utils.Unlock('cmd')
1067
    try:
1068
      time.sleep(min(60, max_time))
1069
    finally:
1070
      if unlock:
1071
        utils.Lock('cmd')
1072

    
1073
  if done:
1074
    logger.ToStdout("Instance %s's disks are in sync." % instance.name)
1075
  return not cumul_degraded
1076

    
1077

    
1078
def _CheckDiskConsistency(cfgw, dev, node, on_primary):
1079
  """Check that mirrors are not degraded.
1080

1081
  """
1082
  cfgw.SetDiskID(dev, node)
1083

    
1084
  result = True
1085
  if on_primary or dev.AssembleOnSecondary():
1086
    rstats = rpc.call_blockdev_find(node, dev)
1087
    if not rstats:
1088
      logger.ToStderr("Can't get any data from node %s" % node)
1089
      result = False
1090
    else:
1091
      result = result and (not rstats[5])
1092
  if dev.children:
1093
    for child in dev.children:
1094
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
1095

    
1096
  return result
1097

    
1098

    
1099
class LUDiagnoseOS(NoHooksLU):
1100
  """Logical unit for OS diagnose/query.
1101

1102
  """
1103
  _OP_REQP = []
1104

    
1105
  def CheckPrereq(self):
1106
    """Check prerequisites.
1107

1108
    This always succeeds, since this is a pure query LU.
1109

1110
    """
1111
    return
1112

    
1113
  def Exec(self, feedback_fn):
1114
    """Compute the list of OSes.
1115

1116
    """
1117
    node_list = self.cfg.GetNodeList()
1118
    node_data = rpc.call_os_diagnose(node_list)
1119
    if node_data == False:
1120
      raise errors.OpExecError("Can't gather the list of OSes")
1121
    return node_data
1122

    
1123

    
1124
class LURemoveNode(LogicalUnit):
1125
  """Logical unit for removing a node.
1126

1127
  """
1128
  HPATH = "node-remove"
1129
  HTYPE = constants.HTYPE_NODE
1130
  _OP_REQP = ["node_name"]
1131

    
1132
  def BuildHooksEnv(self):
1133
    """Build hooks env.
1134

1135
    This doesn't run on the target node in the pre phase as a failed
1136
    node would not allows itself to run.
1137

1138
    """
1139
    env = {
1140
      "OP_TARGET": self.op.node_name,
1141
      "NODE_NAME": self.op.node_name,
1142
      }
1143
    all_nodes = self.cfg.GetNodeList()
1144
    all_nodes.remove(self.op.node_name)
1145
    return env, all_nodes, all_nodes
1146

    
1147
  def CheckPrereq(self):
1148
    """Check prerequisites.
1149

1150
    This checks:
1151
     - the node exists in the configuration
1152
     - it does not have primary or secondary instances
1153
     - it's not the master
1154

1155
    Any errors are signalled by raising errors.OpPrereqError.
1156

1157
    """
1158
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1159
    if node is None:
1160
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1161

    
1162
    instance_list = self.cfg.GetInstanceList()
1163

    
1164
    masternode = self.sstore.GetMasterNode()
1165
    if node.name == masternode:
1166
      raise errors.OpPrereqError("Node is the master node,"
1167
                                 " you need to failover first.")
1168

    
1169
    for instance_name in instance_list:
1170
      instance = self.cfg.GetInstanceInfo(instance_name)
1171
      if node.name == instance.primary_node:
1172
        raise errors.OpPrereqError("Instance %s still running on the node,"
1173
                                   " please remove first." % instance_name)
1174
      if node.name in instance.secondary_nodes:
1175
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1176
                                   " please remove first." % instance_name)
1177
    self.op.node_name = node.name
1178
    self.node = node
1179

    
1180
  def Exec(self, feedback_fn):
1181
    """Removes the node from the cluster.
1182

1183
    """
1184
    node = self.node
1185
    logger.Info("stopping the node daemon and removing configs from node %s" %
1186
                node.name)
1187

    
1188
    rpc.call_node_leave_cluster(node.name)
1189

    
1190
    ssh.SSHCall(node.name, 'root', "%s stop" % constants.NODE_INITD_SCRIPT)
1191

    
1192
    logger.Info("Removing node %s from config" % node.name)
1193

    
1194
    self.cfg.RemoveNode(node.name)
1195

    
1196

    
1197
class LUQueryNodes(NoHooksLU):
1198
  """Logical unit for querying nodes.
1199

1200
  """
1201
  _OP_REQP = ["output_fields", "names"]
1202

    
1203
  def CheckPrereq(self):
1204
    """Check prerequisites.
1205

1206
    This checks that the fields required are valid output fields.
1207

1208
    """
1209
    self.dynamic_fields = frozenset(["dtotal", "dfree",
1210
                                     "mtotal", "mnode", "mfree",
1211
                                     "bootid"])
1212

    
1213
    _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt",
1214
                               "pinst_list", "sinst_list",
1215
                               "pip", "sip"],
1216
                       dynamic=self.dynamic_fields,
1217
                       selected=self.op.output_fields)
1218

    
1219
    self.wanted = _GetWantedNodes(self, self.op.names)
1220

    
1221
  def Exec(self, feedback_fn):
1222
    """Computes the list of nodes and their attributes.
1223

1224
    """
1225
    nodenames = self.wanted
1226
    nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]
1227

    
1228
    # begin data gathering
1229

    
1230
    if self.dynamic_fields.intersection(self.op.output_fields):
1231
      live_data = {}
1232
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
1233
      for name in nodenames:
1234
        nodeinfo = node_data.get(name, None)
1235
        if nodeinfo:
1236
          live_data[name] = {
1237
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1238
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1239
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1240
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1241
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1242
            "bootid": nodeinfo['bootid'],
1243
            }
1244
        else:
1245
          live_data[name] = {}
1246
    else:
1247
      live_data = dict.fromkeys(nodenames, {})
1248

    
1249
    node_to_primary = dict([(name, set()) for name in nodenames])
1250
    node_to_secondary = dict([(name, set()) for name in nodenames])
1251

    
1252
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1253
                             "sinst_cnt", "sinst_list"))
1254
    if inst_fields & frozenset(self.op.output_fields):
1255
      instancelist = self.cfg.GetInstanceList()
1256

    
1257
      for instance_name in instancelist:
1258
        inst = self.cfg.GetInstanceInfo(instance_name)
1259
        if inst.primary_node in node_to_primary:
1260
          node_to_primary[inst.primary_node].add(inst.name)
1261
        for secnode in inst.secondary_nodes:
1262
          if secnode in node_to_secondary:
1263
            node_to_secondary[secnode].add(inst.name)
1264

    
1265
    # end data gathering
1266

    
1267
    output = []
1268
    for node in nodelist:
1269
      node_output = []
1270
      for field in self.op.output_fields:
1271
        if field == "name":
1272
          val = node.name
1273
        elif field == "pinst_list":
1274
          val = list(node_to_primary[node.name])
1275
        elif field == "sinst_list":
1276
          val = list(node_to_secondary[node.name])
1277
        elif field == "pinst_cnt":
1278
          val = len(node_to_primary[node.name])
1279
        elif field == "sinst_cnt":
1280
          val = len(node_to_secondary[node.name])
1281
        elif field == "pip":
1282
          val = node.primary_ip
1283
        elif field == "sip":
1284
          val = node.secondary_ip
1285
        elif field in self.dynamic_fields:
1286
          val = live_data[node.name].get(field, None)
1287
        else:
1288
          raise errors.ParameterError(field)
1289
        node_output.append(val)
1290
      output.append(node_output)
1291

    
1292
    return output
1293

    
1294

    
1295
class LUQueryNodeVolumes(NoHooksLU):
1296
  """Logical unit for getting volumes on node(s).
1297

1298
  """
1299
  _OP_REQP = ["nodes", "output_fields"]
1300

    
1301
  def CheckPrereq(self):
1302
    """Check prerequisites.
1303

1304
    This checks that the fields required are valid output fields.
1305

1306
    """
1307
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1308

    
1309
    _CheckOutputFields(static=["node"],
1310
                       dynamic=["phys", "vg", "name", "size", "instance"],
1311
                       selected=self.op.output_fields)
1312

    
1313

    
1314
  def Exec(self, feedback_fn):
1315
    """Computes the list of nodes and their attributes.
1316

1317
    """
1318
    nodenames = self.nodes
1319
    volumes = rpc.call_node_volumes(nodenames)
1320

    
1321
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1322
             in self.cfg.GetInstanceList()]
1323

    
1324
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1325

    
1326
    output = []
1327
    for node in nodenames:
1328
      if node not in volumes or not volumes[node]:
1329
        continue
1330

    
1331
      node_vols = volumes[node][:]
1332
      node_vols.sort(key=lambda vol: vol['dev'])
1333

    
1334
      for vol in node_vols:
1335
        node_output = []
1336
        for field in self.op.output_fields:
1337
          if field == "node":
1338
            val = node
1339
          elif field == "phys":
1340
            val = vol['dev']
1341
          elif field == "vg":
1342
            val = vol['vg']
1343
          elif field == "name":
1344
            val = vol['name']
1345
          elif field == "size":
1346
            val = int(float(vol['size']))
1347
          elif field == "instance":
1348
            for inst in ilist:
1349
              if node not in lv_by_node[inst]:
1350
                continue
1351
              if vol['name'] in lv_by_node[inst][node]:
1352
                val = inst.name
1353
                break
1354
            else:
1355
              val = '-'
1356
          else:
1357
            raise errors.ParameterError(field)
1358
          node_output.append(str(val))
1359

    
1360
        output.append(node_output)
1361

    
1362
    return output
1363

    
1364

    
1365
class LUAddNode(LogicalUnit):
1366
  """Logical unit for adding node to the cluster.
1367

1368
  """
1369
  HPATH = "node-add"
1370
  HTYPE = constants.HTYPE_NODE
1371
  _OP_REQP = ["node_name"]
1372

    
1373
  def BuildHooksEnv(self):
1374
    """Build hooks env.
1375

1376
    This will run on all nodes before, and on all nodes + the new node after.
1377

1378
    """
1379
    env = {
1380
      "OP_TARGET": self.op.node_name,
1381
      "NODE_NAME": self.op.node_name,
1382
      "NODE_PIP": self.op.primary_ip,
1383
      "NODE_SIP": self.op.secondary_ip,
1384
      }
1385
    nodes_0 = self.cfg.GetNodeList()
1386
    nodes_1 = nodes_0 + [self.op.node_name, ]
1387
    return env, nodes_0, nodes_1
1388

    
1389
  def CheckPrereq(self):
1390
    """Check prerequisites.
1391

1392
    This checks:
1393
     - the new node is not already in the config
1394
     - it is resolvable
1395
     - its parameters (single/dual homed) matches the cluster
1396

1397
    Any errors are signalled by raising errors.OpPrereqError.
1398

1399
    """
1400
    node_name = self.op.node_name
1401
    cfg = self.cfg
1402

    
1403
    dns_data = utils.HostInfo(node_name)
1404

    
1405
    node = dns_data.name
1406
    primary_ip = self.op.primary_ip = dns_data.ip
1407
    secondary_ip = getattr(self.op, "secondary_ip", None)
1408
    if secondary_ip is None:
1409
      secondary_ip = primary_ip
1410
    if not utils.IsValidIP(secondary_ip):
1411
      raise errors.OpPrereqError("Invalid secondary IP given")
1412
    self.op.secondary_ip = secondary_ip
1413
    node_list = cfg.GetNodeList()
1414
    if node in node_list:
1415
      raise errors.OpPrereqError("Node %s is already in the configuration"
1416
                                 % node)
1417

    
1418
    for existing_node_name in node_list:
1419
      existing_node = cfg.GetNodeInfo(existing_node_name)
1420
      if (existing_node.primary_ip == primary_ip or
1421
          existing_node.secondary_ip == primary_ip or
1422
          existing_node.primary_ip == secondary_ip or
1423
          existing_node.secondary_ip == secondary_ip):
1424
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1425
                                   " existing node %s" % existing_node.name)
1426

    
1427
    # check that the type of the node (single versus dual homed) is the
1428
    # same as for the master
1429
    myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
1430
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1431
    newbie_singlehomed = secondary_ip == primary_ip
1432
    if master_singlehomed != newbie_singlehomed:
1433
      if master_singlehomed:
1434
        raise errors.OpPrereqError("The master has no private ip but the"
1435
                                   " new node has one")
1436
      else:
1437
        raise errors.OpPrereqError("The master has a private ip but the"
1438
                                   " new node doesn't have one")
1439

    
1440
    # checks reachablity
1441
    if not utils.TcpPing(utils.HostInfo().name,
1442
                         primary_ip,
1443
                         constants.DEFAULT_NODED_PORT):
1444
      raise errors.OpPrereqError("Node not reachable by ping")
1445

    
1446
    if not newbie_singlehomed:
1447
      # check reachability from my secondary ip to newbie's secondary ip
1448
      if not utils.TcpPing(myself.secondary_ip,
1449
                           secondary_ip,
1450
                           constants.DEFAULT_NODED_PORT):
1451
        raise errors.OpPrereqError(
1452
          "Node secondary ip not reachable by TCP based ping to noded port")
1453

    
1454
    self.new_node = objects.Node(name=node,
1455
                                 primary_ip=primary_ip,
1456
                                 secondary_ip=secondary_ip)
1457

    
1458
  def Exec(self, feedback_fn):
1459
    """Adds the new node to the cluster.
1460

1461
    """
1462
    new_node = self.new_node
1463
    node = new_node.name
1464

    
1465
    # set up inter-node password and certificate and restarts the node daemon
1466
    gntpass = self.sstore.GetNodeDaemonPassword()
1467
    if not re.match('^[a-zA-Z0-9.]{1,64}$', gntpass):
1468
      raise errors.OpExecError("ganeti password corruption detected")
1469
    f = open(constants.SSL_CERT_FILE)
1470
    try:
1471
      gntpem = f.read(8192)
1472
    finally:
1473
      f.close()
1474
    # in the base64 pem encoding, neither '!' nor '.' are valid chars,
1475
    # so we use this to detect an invalid certificate; as long as the
1476
    # cert doesn't contain this, the here-document will be correctly
1477
    # parsed by the shell sequence below
1478
    if re.search('^!EOF\.', gntpem, re.MULTILINE):
1479
      raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
1480
    if not gntpem.endswith("\n"):
1481
      raise errors.OpExecError("PEM must end with newline")
1482
    logger.Info("copy cluster pass to %s and starting the node daemon" % node)
1483

    
1484
    # and then connect with ssh to set password and start ganeti-noded
1485
    # note that all the below variables are sanitized at this point,
1486
    # either by being constants or by the checks above
1487
    ss = self.sstore
1488
    mycommand = ("umask 077 && "
1489
                 "echo '%s' > '%s' && "
1490
                 "cat > '%s' << '!EOF.' && \n"
1491
                 "%s!EOF.\n%s restart" %
1492
                 (gntpass, ss.KeyToFilename(ss.SS_NODED_PASS),
1493
                  constants.SSL_CERT_FILE, gntpem,
1494
                  constants.NODE_INITD_SCRIPT))
1495

    
1496
    result = ssh.SSHCall(node, 'root', mycommand, batch=False, ask_key=True)
1497
    if result.failed:
1498
      raise errors.OpExecError("Remote command on node %s, error: %s,"
1499
                               " output: %s" %
1500
                               (node, result.fail_reason, result.output))
1501

    
1502
    # check connectivity
1503
    time.sleep(4)
1504

    
1505
    result = rpc.call_version([node])[node]
1506
    if result:
1507
      if constants.PROTOCOL_VERSION == result:
1508
        logger.Info("communication to node %s fine, sw version %s match" %
1509
                    (node, result))
1510
      else:
1511
        raise errors.OpExecError("Version mismatch master version %s,"
1512
                                 " node version %s" %
1513
                                 (constants.PROTOCOL_VERSION, result))
1514
    else:
1515
      raise errors.OpExecError("Cannot get version from the new node")
1516

    
1517
    # setup ssh on node
1518
    logger.Info("copy ssh key to node %s" % node)
1519
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1520
    keyarray = []
1521
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1522
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1523
                priv_key, pub_key]
1524

    
1525
    for i in keyfiles:
1526
      f = open(i, 'r')
1527
      try:
1528
        keyarray.append(f.read())
1529
      finally:
1530
        f.close()
1531

    
1532
    result = rpc.call_node_add(node, keyarray[0], keyarray[1], keyarray[2],
1533
                               keyarray[3], keyarray[4], keyarray[5])
1534

    
1535
    if not result:
1536
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1537

    
1538
    # Add node to our /etc/hosts, and add key to known_hosts
1539
    _UpdateEtcHosts(new_node.name, new_node.primary_ip)
1540
    _UpdateKnownHosts(new_node.name, new_node.primary_ip,
1541
                      self.cfg.GetHostKey())
1542

    
1543
    if new_node.secondary_ip != new_node.primary_ip:
1544
      if not rpc.call_node_tcp_ping(new_node.name,
1545
                                    constants.LOCALHOST_IP_ADDRESS,
1546
                                    new_node.secondary_ip,
1547
                                    constants.DEFAULT_NODED_PORT,
1548
                                    10, False):
1549
        raise errors.OpExecError("Node claims it doesn't have the"
1550
                                 " secondary ip you gave (%s).\n"
1551
                                 "Please fix and re-run this command." %
1552
                                 new_node.secondary_ip)
1553

    
1554
    success, msg = ssh.VerifyNodeHostname(node)
1555
    if not success:
1556
      raise errors.OpExecError("Node '%s' claims it has a different hostname"
1557
                               " than the one the resolver gives: %s.\n"
1558
                               "Please fix and re-run this command." %
1559
                               (node, msg))
1560

    
1561
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1562
    # including the node just added
1563
    myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
1564
    dist_nodes = self.cfg.GetNodeList() + [node]
1565
    if myself.name in dist_nodes:
1566
      dist_nodes.remove(myself.name)
1567

    
1568
    logger.Debug("Copying hosts and known_hosts to all nodes")
1569
    for fname in ("/etc/hosts", constants.SSH_KNOWN_HOSTS_FILE):
1570
      result = rpc.call_upload_file(dist_nodes, fname)
1571
      for to_node in dist_nodes:
1572
        if not result[to_node]:
1573
          logger.Error("copy of file %s to node %s failed" %
1574
                       (fname, to_node))
1575

    
1576
    to_copy = ss.GetFileList()
1577
    for fname in to_copy:
1578
      if not ssh.CopyFileToNode(node, fname):
1579
        logger.Error("could not copy file %s to node %s" % (fname, node))
1580

    
1581
    logger.Info("adding node %s to cluster.conf" % node)
1582
    self.cfg.AddNode(new_node)
1583

    
1584

    
1585
class LUMasterFailover(LogicalUnit):
1586
  """Failover the master node to the current node.
1587

1588
  This is a special LU in that it must run on a non-master node.
1589

1590
  """
1591
  HPATH = "master-failover"
1592
  HTYPE = constants.HTYPE_CLUSTER
1593
  REQ_MASTER = False
1594
  _OP_REQP = []
1595

    
1596
  def BuildHooksEnv(self):
1597
    """Build hooks env.
1598

1599
    This will run on the new master only in the pre phase, and on all
1600
    the nodes in the post phase.
1601

1602
    """
1603
    env = {
1604
      "OP_TARGET": self.new_master,
1605
      "NEW_MASTER": self.new_master,
1606
      "OLD_MASTER": self.old_master,
1607
      }
1608
    return env, [self.new_master], self.cfg.GetNodeList()
1609

    
1610
  def CheckPrereq(self):
1611
    """Check prerequisites.
1612

1613
    This checks that we are not already the master.
1614

1615
    """
1616
    self.new_master = utils.HostInfo().name
1617
    self.old_master = self.sstore.GetMasterNode()
1618

    
1619
    if self.old_master == self.new_master:
1620
      raise errors.OpPrereqError("This commands must be run on the node"
1621
                                 " where you want the new master to be.\n"
1622
                                 "%s is already the master" %
1623
                                 self.old_master)
1624

    
1625
  def Exec(self, feedback_fn):
1626
    """Failover the master node.
1627

1628
    This command, when run on a non-master node, will cause the current
1629
    master to cease being master, and the non-master to become new
1630
    master.
1631

1632
    """
1633
    #TODO: do not rely on gethostname returning the FQDN
1634
    logger.Info("setting master to %s, old master: %s" %
1635
                (self.new_master, self.old_master))
1636

    
1637
    if not rpc.call_node_stop_master(self.old_master):
1638
      logger.Error("could disable the master role on the old master"
1639
                   " %s, please disable manually" % self.old_master)
1640

    
1641
    ss = self.sstore
1642
    ss.SetKey(ss.SS_MASTER_NODE, self.new_master)
1643
    if not rpc.call_upload_file(self.cfg.GetNodeList(),
1644
                                ss.KeyToFilename(ss.SS_MASTER_NODE)):
1645
      logger.Error("could not distribute the new simple store master file"
1646
                   " to the other nodes, please check.")
1647

    
1648
    if not rpc.call_node_start_master(self.new_master):
1649
      logger.Error("could not start the master role on the new master"
1650
                   " %s, please check" % self.new_master)
1651
      feedback_fn("Error in activating the master IP on the new master,\n"
1652
                  "please fix manually.")
1653

    
1654

    
1655

    
1656
class LUQueryClusterInfo(NoHooksLU):
1657
  """Query cluster configuration.
1658

1659
  """
1660
  _OP_REQP = []
1661
  REQ_MASTER = False
1662

    
1663
  def CheckPrereq(self):
1664
    """No prerequsites needed for this LU.
1665

1666
    """
1667
    pass
1668

    
1669
  def Exec(self, feedback_fn):
1670
    """Return cluster config.
1671

1672
    """
1673
    result = {
1674
      "name": self.sstore.GetClusterName(),
1675
      "software_version": constants.RELEASE_VERSION,
1676
      "protocol_version": constants.PROTOCOL_VERSION,
1677
      "config_version": constants.CONFIG_VERSION,
1678
      "os_api_version": constants.OS_API_VERSION,
1679
      "export_version": constants.EXPORT_VERSION,
1680
      "master": self.sstore.GetMasterNode(),
1681
      "architecture": (platform.architecture()[0], platform.machine()),
1682
      }
1683

    
1684
    return result
1685

    
1686

    
1687
class LUClusterCopyFile(NoHooksLU):
1688
  """Copy file to cluster.
1689

1690
  """
1691
  _OP_REQP = ["nodes", "filename"]
1692

    
1693
  def CheckPrereq(self):
1694
    """Check prerequisites.
1695

1696
    It should check that the named file exists and that the given list
1697
    of nodes is valid.
1698

1699
    """
1700
    if not os.path.exists(self.op.filename):
1701
      raise errors.OpPrereqError("No such filename '%s'" % self.op.filename)
1702

    
1703
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1704

    
1705
  def Exec(self, feedback_fn):
1706
    """Copy a file from master to some nodes.
1707

1708
    Args:
1709
      opts - class with options as members
1710
      args - list containing a single element, the file name
1711
    Opts used:
1712
      nodes - list containing the name of target nodes; if empty, all nodes
1713

1714
    """
1715
    filename = self.op.filename
1716

    
1717
    myname = utils.HostInfo().name
1718

    
1719
    for node in self.nodes:
1720
      if node == myname:
1721
        continue
1722
      if not ssh.CopyFileToNode(node, filename):
1723
        logger.Error("Copy of file %s to node %s failed" % (filename, node))
1724

    
1725

    
1726
class LUDumpClusterConfig(NoHooksLU):
1727
  """Return a text-representation of the cluster-config.
1728

1729
  """
1730
  _OP_REQP = []
1731

    
1732
  def CheckPrereq(self):
1733
    """No prerequisites.
1734

1735
    """
1736
    pass
1737

    
1738
  def Exec(self, feedback_fn):
1739
    """Dump a representation of the cluster config to the standard output.
1740

1741
    """
1742
    return self.cfg.DumpConfig()
1743

    
1744

    
1745
class LURunClusterCommand(NoHooksLU):
1746
  """Run a command on some nodes.
1747

1748
  """
1749
  _OP_REQP = ["command", "nodes"]
1750

    
1751
  def CheckPrereq(self):
1752
    """Check prerequisites.
1753

1754
    It checks that the given list of nodes is valid.
1755

1756
    """
1757
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1758

    
1759
  def Exec(self, feedback_fn):
1760
    """Run a command on some nodes.
1761

1762
    """
1763
    data = []
1764
    for node in self.nodes:
1765
      result = ssh.SSHCall(node, "root", self.op.command)
1766
      data.append((node, result.output, result.exit_code))
1767

    
1768
    return data
1769

    
1770

    
1771
class LUActivateInstanceDisks(NoHooksLU):
1772
  """Bring up an instance's disks.
1773

1774
  """
1775
  _OP_REQP = ["instance_name"]
1776

    
1777
  def CheckPrereq(self):
1778
    """Check prerequisites.
1779

1780
    This checks that the instance is in the cluster.
1781

1782
    """
1783
    instance = self.cfg.GetInstanceInfo(
1784
      self.cfg.ExpandInstanceName(self.op.instance_name))
1785
    if instance is None:
1786
      raise errors.OpPrereqError("Instance '%s' not known" %
1787
                                 self.op.instance_name)
1788
    self.instance = instance
1789

    
1790

    
1791
  def Exec(self, feedback_fn):
1792
    """Activate the disks.
1793

1794
    """
1795
    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
1796
    if not disks_ok:
1797
      raise errors.OpExecError("Cannot activate block devices")
1798

    
1799
    return disks_info
1800

    
1801

    
1802
def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
1803
  """Prepare the block devices for an instance.
1804

1805
  This sets up the block devices on all nodes.
1806

1807
  Args:
1808
    instance: a ganeti.objects.Instance object
1809
    ignore_secondaries: if true, errors on secondary nodes won't result
1810
                        in an error return from the function
1811

1812
  Returns:
1813
    false if the operation failed
1814
    list of (host, instance_visible_name, node_visible_name) if the operation
1815
         suceeded with the mapping from node devices to instance devices
1816
  """
1817
  device_info = []
1818
  disks_ok = True
1819
  for inst_disk in instance.disks:
1820
    master_result = None
1821
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1822
      cfg.SetDiskID(node_disk, node)
1823
      is_primary = node == instance.primary_node
1824
      result = rpc.call_blockdev_assemble(node, node_disk, is_primary)
1825
      if not result:
1826
        logger.Error("could not prepare block device %s on node %s (is_pri"
1827
                     "mary=%s)" % (inst_disk.iv_name, node, is_primary))
1828
        if is_primary or not ignore_secondaries:
1829
          disks_ok = False
1830
      if is_primary:
1831
        master_result = result
1832
    device_info.append((instance.primary_node, inst_disk.iv_name,
1833
                        master_result))
1834

    
1835
  # leave the disks configured for the primary node
1836
  # this is a workaround that would be fixed better by
1837
  # improving the logical/physical id handling
1838
  for disk in instance.disks:
1839
    cfg.SetDiskID(disk, instance.primary_node)
1840

    
1841
  return disks_ok, device_info
1842

    
1843

    
1844
def _StartInstanceDisks(cfg, instance, force):
1845
  """Start the disks of an instance.
1846

1847
  """
1848
  disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
1849
                                           ignore_secondaries=force)
1850
  if not disks_ok:
1851
    _ShutdownInstanceDisks(instance, cfg)
1852
    if force is not None and not force:
1853
      logger.Error("If the message above refers to a secondary node,"
1854
                   " you can retry the operation using '--force'.")
1855
    raise errors.OpExecError("Disk consistency error")
1856

    
1857

    
1858
class LUDeactivateInstanceDisks(NoHooksLU):
1859
  """Shutdown an instance's disks.
1860

1861
  """
1862
  _OP_REQP = ["instance_name"]
1863

    
1864
  def CheckPrereq(self):
1865
    """Check prerequisites.
1866

1867
    This checks that the instance is in the cluster.
1868

1869
    """
1870
    instance = self.cfg.GetInstanceInfo(
1871
      self.cfg.ExpandInstanceName(self.op.instance_name))
1872
    if instance is None:
1873
      raise errors.OpPrereqError("Instance '%s' not known" %
1874
                                 self.op.instance_name)
1875
    self.instance = instance
1876

    
1877
  def Exec(self, feedback_fn):
1878
    """Deactivate the disks
1879

1880
    """
1881
    instance = self.instance
1882
    ins_l = rpc.call_instance_list([instance.primary_node])
1883
    ins_l = ins_l[instance.primary_node]
1884
    if not type(ins_l) is list:
1885
      raise errors.OpExecError("Can't contact node '%s'" %
1886
                               instance.primary_node)
1887

    
1888
    if self.instance.name in ins_l:
1889
      raise errors.OpExecError("Instance is running, can't shutdown"
1890
                               " block devices.")
1891

    
1892
    _ShutdownInstanceDisks(instance, self.cfg)
1893

    
1894

    
1895
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
1896
  """Shutdown block devices of an instance.
1897

1898
  This does the shutdown on all nodes of the instance.
1899

1900
  If the ignore_primary is false, errors on the primary node are
1901
  ignored.
1902

1903
  """
1904
  result = True
1905
  for disk in instance.disks:
1906
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
1907
      cfg.SetDiskID(top_disk, node)
1908
      if not rpc.call_blockdev_shutdown(node, top_disk):
1909
        logger.Error("could not shutdown block device %s on node %s" %
1910
                     (disk.iv_name, node))
1911
        if not ignore_primary or node != instance.primary_node:
1912
          result = False
1913
  return result
1914

    
1915

    
1916
class LUStartupInstance(LogicalUnit):
1917
  """Starts an instance.
1918

1919
  """
1920
  HPATH = "instance-start"
1921
  HTYPE = constants.HTYPE_INSTANCE
1922
  _OP_REQP = ["instance_name", "force"]
1923

    
1924
  def BuildHooksEnv(self):
1925
    """Build hooks env.
1926

1927
    This runs on master, primary and secondary nodes of the instance.
1928

1929
    """
1930
    env = {
1931
      "FORCE": self.op.force,
1932
      }
1933
    env.update(_BuildInstanceHookEnvByObject(self.instance))
1934
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
1935
          list(self.instance.secondary_nodes))
1936
    return env, nl, nl
1937

    
1938
  def CheckPrereq(self):
1939
    """Check prerequisites.
1940

1941
    This checks that the instance is in the cluster.
1942

1943
    """
1944
    instance = self.cfg.GetInstanceInfo(
1945
      self.cfg.ExpandInstanceName(self.op.instance_name))
1946
    if instance is None:
1947
      raise errors.OpPrereqError("Instance '%s' not known" %
1948
                                 self.op.instance_name)
1949

    
1950
    # check bridges existance
1951
    _CheckInstanceBridgesExist(instance)
1952

    
1953
    self.instance = instance
1954
    self.op.instance_name = instance.name
1955

    
1956
  def Exec(self, feedback_fn):
1957
    """Start the instance.
1958

1959
    """
1960
    instance = self.instance
1961
    force = self.op.force
1962
    extra_args = getattr(self.op, "extra_args", "")
1963

    
1964
    node_current = instance.primary_node
1965

    
1966
    nodeinfo = rpc.call_node_info([node_current], self.cfg.GetVGName())
1967
    if not nodeinfo:
1968
      raise errors.OpExecError("Could not contact node %s for infos" %
1969
                               (node_current))
1970

    
1971
    freememory = nodeinfo[node_current]['memory_free']
1972
    memory = instance.memory
1973
    if memory > freememory:
1974
      raise errors.OpExecError("Not enough memory to start instance"
1975
                               " %s on node %s"
1976
                               " needed %s MiB, available %s MiB" %
1977
                               (instance.name, node_current, memory,
1978
                                freememory))
1979

    
1980
    _StartInstanceDisks(self.cfg, instance, force)
1981

    
1982
    if not rpc.call_instance_start(node_current, instance, extra_args):
1983
      _ShutdownInstanceDisks(instance, self.cfg)
1984
      raise errors.OpExecError("Could not start instance")
1985

    
1986
    self.cfg.MarkInstanceUp(instance.name)
1987

    
1988

    
1989
class LURebootInstance(LogicalUnit):
1990
  """Reboot an instance.
1991

1992
  """
1993
  HPATH = "instance-reboot"
1994
  HTYPE = constants.HTYPE_INSTANCE
1995
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
1996

    
1997
  def BuildHooksEnv(self):
1998
    """Build hooks env.
1999

2000
    This runs on master, primary and secondary nodes of the instance.
2001

2002
    """
2003
    env = {
2004
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2005
      }
2006
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2007
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2008
          list(self.instance.secondary_nodes))
2009
    return env, nl, nl
2010

    
2011
  def CheckPrereq(self):
2012
    """Check prerequisites.
2013

2014
    This checks that the instance is in the cluster.
2015

2016
    """
2017
    instance = self.cfg.GetInstanceInfo(
2018
      self.cfg.ExpandInstanceName(self.op.instance_name))
2019
    if instance is None:
2020
      raise errors.OpPrereqError("Instance '%s' not known" %
2021
                                 self.op.instance_name)
2022

    
2023
    # check bridges existance
2024
    _CheckInstanceBridgesExist(instance)
2025

    
2026
    self.instance = instance
2027
    self.op.instance_name = instance.name
2028

    
2029
  def Exec(self, feedback_fn):
2030
    """Reboot the instance.
2031

2032
    """
2033
    instance = self.instance
2034
    ignore_secondaries = self.op.ignore_secondaries
2035
    reboot_type = self.op.reboot_type
2036
    extra_args = getattr(self.op, "extra_args", "")
2037

    
2038
    node_current = instance.primary_node
2039

    
2040
    if reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2041
                           constants.INSTANCE_REBOOT_HARD,
2042
                           constants.INSTANCE_REBOOT_FULL]:
2043
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2044
                                  (constants.INSTANCE_REBOOT_SOFT,
2045
                                   constants.INSTANCE_REBOOT_HARD,
2046
                                   constants.INSTANCE_REBOOT_FULL))
2047

    
2048
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2049
                       constants.INSTANCE_REBOOT_HARD]:
2050
      if not rpc.call_instance_reboot(node_current, instance,
2051
                                      reboot_type, extra_args):
2052
        raise errors.OpExecError("Could not reboot instance")
2053
    else:
2054
      if not rpc.call_instance_shutdown(node_current, instance):
2055
        raise errors.OpExecError("could not shutdown instance for full reboot")
2056
      _ShutdownInstanceDisks(instance, self.cfg)
2057
      _StartInstanceDisks(self.cfg, instance, ignore_secondaries)
2058
      if not rpc.call_instance_start(node_current, instance, extra_args):
2059
        _ShutdownInstanceDisks(instance, self.cfg)
2060
        raise errors.OpExecError("Could not start instance for full reboot")
2061

    
2062
    self.cfg.MarkInstanceUp(instance.name)
2063

    
2064

    
2065
class LUShutdownInstance(LogicalUnit):
2066
  """Shutdown an instance.
2067

2068
  """
2069
  HPATH = "instance-stop"
2070
  HTYPE = constants.HTYPE_INSTANCE
2071
  _OP_REQP = ["instance_name"]
2072

    
2073
  def BuildHooksEnv(self):
2074
    """Build hooks env.
2075

2076
    This runs on master, primary and secondary nodes of the instance.
2077

2078
    """
2079
    env = _BuildInstanceHookEnvByObject(self.instance)
2080
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2081
          list(self.instance.secondary_nodes))
2082
    return env, nl, nl
2083

    
2084
  def CheckPrereq(self):
2085
    """Check prerequisites.
2086

2087
    This checks that the instance is in the cluster.
2088

2089
    """
2090
    instance = self.cfg.GetInstanceInfo(
2091
      self.cfg.ExpandInstanceName(self.op.instance_name))
2092
    if instance is None:
2093
      raise errors.OpPrereqError("Instance '%s' not known" %
2094
                                 self.op.instance_name)
2095
    self.instance = instance
2096

    
2097
  def Exec(self, feedback_fn):
2098
    """Shutdown the instance.
2099

2100
    """
2101
    instance = self.instance
2102
    node_current = instance.primary_node
2103
    if not rpc.call_instance_shutdown(node_current, instance):
2104
      logger.Error("could not shutdown instance")
2105

    
2106
    self.cfg.MarkInstanceDown(instance.name)
2107
    _ShutdownInstanceDisks(instance, self.cfg)
2108

    
2109

    
2110
class LUReinstallInstance(LogicalUnit):
2111
  """Reinstall an instance.
2112

2113
  """
2114
  HPATH = "instance-reinstall"
2115
  HTYPE = constants.HTYPE_INSTANCE
2116
  _OP_REQP = ["instance_name"]
2117

    
2118
  def BuildHooksEnv(self):
2119
    """Build hooks env.
2120

2121
    This runs on master, primary and secondary nodes of the instance.
2122

2123
    """
2124
    env = _BuildInstanceHookEnvByObject(self.instance)
2125
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2126
          list(self.instance.secondary_nodes))
2127
    return env, nl, nl
2128

    
2129
  def CheckPrereq(self):
2130
    """Check prerequisites.
2131

2132
    This checks that the instance is in the cluster and is not running.
2133

2134
    """
2135
    instance = self.cfg.GetInstanceInfo(
2136
      self.cfg.ExpandInstanceName(self.op.instance_name))
2137
    if instance is None:
2138
      raise errors.OpPrereqError("Instance '%s' not known" %
2139
                                 self.op.instance_name)
2140
    if instance.disk_template == constants.DT_DISKLESS:
2141
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2142
                                 self.op.instance_name)
2143
    if instance.status != "down":
2144
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2145
                                 self.op.instance_name)
2146
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2147
    if remote_info:
2148
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2149
                                 (self.op.instance_name,
2150
                                  instance.primary_node))
2151

    
2152
    self.op.os_type = getattr(self.op, "os_type", None)
2153
    if self.op.os_type is not None:
2154
      # OS verification
2155
      pnode = self.cfg.GetNodeInfo(
2156
        self.cfg.ExpandNodeName(instance.primary_node))
2157
      if pnode is None:
2158
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2159
                                   self.op.pnode)
2160
      os_obj = rpc.call_os_get([pnode.name], self.op.os_type)[pnode.name]
2161
      if not isinstance(os_obj, objects.OS):
2162
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2163
                                   " primary node"  % self.op.os_type)
2164

    
2165
    self.instance = instance
2166

    
2167
  def Exec(self, feedback_fn):
2168
    """Reinstall the instance.
2169

2170
    """
2171
    inst = self.instance
2172

    
2173
    if self.op.os_type is not None:
2174
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2175
      inst.os = self.op.os_type
2176
      self.cfg.AddInstance(inst)
2177

    
2178
    _StartInstanceDisks(self.cfg, inst, None)
2179
    try:
2180
      feedback_fn("Running the instance OS create scripts...")
2181
      if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
2182
        raise errors.OpExecError("Could not install OS for instance %s "
2183
                                 "on node %s" %
2184
                                 (inst.name, inst.primary_node))
2185
    finally:
2186
      _ShutdownInstanceDisks(inst, self.cfg)
2187

    
2188

    
2189
class LURenameInstance(LogicalUnit):
2190
  """Rename an instance.
2191

2192
  """
2193
  HPATH = "instance-rename"
2194
  HTYPE = constants.HTYPE_INSTANCE
2195
  _OP_REQP = ["instance_name", "new_name"]
2196

    
2197
  def BuildHooksEnv(self):
2198
    """Build hooks env.
2199

2200
    This runs on master, primary and secondary nodes of the instance.
2201

2202
    """
2203
    env = _BuildInstanceHookEnvByObject(self.instance)
2204
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2205
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2206
          list(self.instance.secondary_nodes))
2207
    return env, nl, nl
2208

    
2209
  def CheckPrereq(self):
2210
    """Check prerequisites.
2211

2212
    This checks that the instance is in the cluster and is not running.
2213

2214
    """
2215
    instance = self.cfg.GetInstanceInfo(
2216
      self.cfg.ExpandInstanceName(self.op.instance_name))
2217
    if instance is None:
2218
      raise errors.OpPrereqError("Instance '%s' not known" %
2219
                                 self.op.instance_name)
2220
    if instance.status != "down":
2221
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2222
                                 self.op.instance_name)
2223
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2224
    if remote_info:
2225
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2226
                                 (self.op.instance_name,
2227
                                  instance.primary_node))
2228
    self.instance = instance
2229

    
2230
    # new name verification
2231
    name_info = utils.HostInfo(self.op.new_name)
2232

    
2233
    self.op.new_name = new_name = name_info.name
2234
    if not getattr(self.op, "ignore_ip", False):
2235
      command = ["fping", "-q", name_info.ip]
2236
      result = utils.RunCmd(command)
2237
      if not result.failed:
2238
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2239
                                   (name_info.ip, new_name))
2240

    
2241

    
2242
  def Exec(self, feedback_fn):
2243
    """Reinstall the instance.
2244

2245
    """
2246
    inst = self.instance
2247
    old_name = inst.name
2248

    
2249
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2250

    
2251
    # re-read the instance from the configuration after rename
2252
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2253

    
2254
    _StartInstanceDisks(self.cfg, inst, None)
2255
    try:
2256
      if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
2257
                                          "sda", "sdb"):
2258
        msg = ("Could run OS rename script for instance %s\n"
2259
               "on node %s\n"
2260
               "(but the instance has been renamed in Ganeti)" %
2261
               (inst.name, inst.primary_node))
2262
        logger.Error(msg)
2263
    finally:
2264
      _ShutdownInstanceDisks(inst, self.cfg)
2265

    
2266

    
2267
class LURemoveInstance(LogicalUnit):
2268
  """Remove an instance.
2269

2270
  """
2271
  HPATH = "instance-remove"
2272
  HTYPE = constants.HTYPE_INSTANCE
2273
  _OP_REQP = ["instance_name"]
2274

    
2275
  def BuildHooksEnv(self):
2276
    """Build hooks env.
2277

2278
    This runs on master, primary and secondary nodes of the instance.
2279

2280
    """
2281
    env = _BuildInstanceHookEnvByObject(self.instance)
2282
    nl = [self.sstore.GetMasterNode()]
2283
    return env, nl, nl
2284

    
2285
  def CheckPrereq(self):
2286
    """Check prerequisites.
2287

2288
    This checks that the instance is in the cluster.
2289

2290
    """
2291
    instance = self.cfg.GetInstanceInfo(
2292
      self.cfg.ExpandInstanceName(self.op.instance_name))
2293
    if instance is None:
2294
      raise errors.OpPrereqError("Instance '%s' not known" %
2295
                                 self.op.instance_name)
2296
    self.instance = instance
2297

    
2298
  def Exec(self, feedback_fn):
2299
    """Remove the instance.
2300

2301
    """
2302
    instance = self.instance
2303
    logger.Info("shutting down instance %s on node %s" %
2304
                (instance.name, instance.primary_node))
2305

    
2306
    if not rpc.call_instance_shutdown(instance.primary_node, instance):
2307
      if self.op.ignore_failures:
2308
        feedback_fn("Warning: can't shutdown instance")
2309
      else:
2310
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2311
                                 (instance.name, instance.primary_node))
2312

    
2313
    logger.Info("removing block devices for instance %s" % instance.name)
2314

    
2315
    if not _RemoveDisks(instance, self.cfg):
2316
      if self.op.ignore_failures:
2317
        feedback_fn("Warning: can't remove instance's disks")
2318
      else:
2319
        raise errors.OpExecError("Can't remove instance's disks")
2320

    
2321
    logger.Info("removing instance %s out of cluster config" % instance.name)
2322

    
2323
    self.cfg.RemoveInstance(instance.name)
2324

    
2325

    
2326
class LUQueryInstances(NoHooksLU):
2327
  """Logical unit for querying instances.
2328

2329
  """
2330
  _OP_REQP = ["output_fields", "names"]
2331

    
2332
  def CheckPrereq(self):
2333
    """Check prerequisites.
2334

2335
    This checks that the fields required are valid output fields.
2336

2337
    """
2338
    self.dynamic_fields = frozenset(["oper_state", "oper_ram"])
2339
    _CheckOutputFields(static=["name", "os", "pnode", "snodes",
2340
                               "admin_state", "admin_ram",
2341
                               "disk_template", "ip", "mac", "bridge",
2342
                               "sda_size", "sdb_size"],
2343
                       dynamic=self.dynamic_fields,
2344
                       selected=self.op.output_fields)
2345

    
2346
    self.wanted = _GetWantedInstances(self, self.op.names)
2347

    
2348
  def Exec(self, feedback_fn):
2349
    """Computes the list of nodes and their attributes.
2350

2351
    """
2352
    instance_names = self.wanted
2353
    instance_list = [self.cfg.GetInstanceInfo(iname) for iname
2354
                     in instance_names]
2355

    
2356
    # begin data gathering
2357

    
2358
    nodes = frozenset([inst.primary_node for inst in instance_list])
2359

    
2360
    bad_nodes = []
2361
    if self.dynamic_fields.intersection(self.op.output_fields):
2362
      live_data = {}
2363
      node_data = rpc.call_all_instances_info(nodes)
2364
      for name in nodes:
2365
        result = node_data[name]
2366
        if result:
2367
          live_data.update(result)
2368
        elif result == False:
2369
          bad_nodes.append(name)
2370
        # else no instance is alive
2371
    else:
2372
      live_data = dict([(name, {}) for name in instance_names])
2373

    
2374
    # end data gathering
2375

    
2376
    output = []
2377
    for instance in instance_list:
2378
      iout = []
2379
      for field in self.op.output_fields:
2380
        if field == "name":
2381
          val = instance.name
2382
        elif field == "os":
2383
          val = instance.os
2384
        elif field == "pnode":
2385
          val = instance.primary_node
2386
        elif field == "snodes":
2387
          val = list(instance.secondary_nodes)
2388
        elif field == "admin_state":
2389
          val = (instance.status != "down")
2390
        elif field == "oper_state":
2391
          if instance.primary_node in bad_nodes:
2392
            val = None
2393
          else:
2394
            val = bool(live_data.get(instance.name))
2395
        elif field == "admin_ram":
2396
          val = instance.memory
2397
        elif field == "oper_ram":
2398
          if instance.primary_node in bad_nodes:
2399
            val = None
2400
          elif instance.name in live_data:
2401
            val = live_data[instance.name].get("memory", "?")
2402
          else:
2403
            val = "-"
2404
        elif field == "disk_template":
2405
          val = instance.disk_template
2406
        elif field == "ip":
2407
          val = instance.nics[0].ip
2408
        elif field == "bridge":
2409
          val = instance.nics[0].bridge
2410
        elif field == "mac":
2411
          val = instance.nics[0].mac
2412
        elif field == "sda_size" or field == "sdb_size":
2413
          disk = instance.FindDisk(field[:3])
2414
          if disk is None:
2415
            val = None
2416
          else:
2417
            val = disk.size
2418
        else:
2419
          raise errors.ParameterError(field)
2420
        iout.append(val)
2421
      output.append(iout)
2422

    
2423
    return output
2424

    
2425

    
2426
class LUFailoverInstance(LogicalUnit):
2427
  """Failover an instance.
2428

2429
  """
2430
  HPATH = "instance-failover"
2431
  HTYPE = constants.HTYPE_INSTANCE
2432
  _OP_REQP = ["instance_name", "ignore_consistency"]
2433

    
2434
  def BuildHooksEnv(self):
2435
    """Build hooks env.
2436

2437
    This runs on master, primary and secondary nodes of the instance.
2438

2439
    """
2440
    env = {
2441
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2442
      }
2443
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2444
    nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
2445
    return env, nl, nl
2446

    
2447
  def CheckPrereq(self):
2448
    """Check prerequisites.
2449

2450
    This checks that the instance is in the cluster.
2451

2452
    """
2453
    instance = self.cfg.GetInstanceInfo(
2454
      self.cfg.ExpandInstanceName(self.op.instance_name))
2455
    if instance is None:
2456
      raise errors.OpPrereqError("Instance '%s' not known" %
2457
                                 self.op.instance_name)
2458

    
2459
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2460
      raise errors.OpPrereqError("Instance's disk layout is not"
2461
                                 " network mirrored, cannot failover.")
2462

    
2463
    secondary_nodes = instance.secondary_nodes
2464
    if not secondary_nodes:
2465
      raise errors.ProgrammerError("no secondary node but using "
2466
                                   "DT_REMOTE_RAID1 template")
2467

    
2468
    # check memory requirements on the secondary node
2469
    target_node = secondary_nodes[0]
2470
    nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
2471
    info = nodeinfo.get(target_node, None)
2472
    if not info:
2473
      raise errors.OpPrereqError("Cannot get current information"
2474
                                 " from node '%s'" % nodeinfo)
2475
    if instance.memory > info['memory_free']:
2476
      raise errors.OpPrereqError("Not enough memory on target node %s."
2477
                                 " %d MB available, %d MB required" %
2478
                                 (target_node, info['memory_free'],
2479
                                  instance.memory))
2480

    
2481
    # check bridge existance
2482
    brlist = [nic.bridge for nic in instance.nics]
2483
    if not rpc.call_bridges_exist(target_node, brlist):
2484
      raise errors.OpPrereqError("One or more target bridges %s does not"
2485
                                 " exist on destination node '%s'" %
2486
                                 (brlist, target_node))
2487

    
2488
    self.instance = instance
2489

    
2490
  def Exec(self, feedback_fn):
2491
    """Failover an instance.
2492

2493
    The failover is done by shutting it down on its present node and
2494
    starting it on the secondary.
2495

2496
    """
2497
    instance = self.instance
2498

    
2499
    source_node = instance.primary_node
2500
    target_node = instance.secondary_nodes[0]
2501

    
2502
    feedback_fn("* checking disk consistency between source and target")
2503
    for dev in instance.disks:
2504
      # for remote_raid1, these are md over drbd
2505
      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
2506
        if not self.op.ignore_consistency:
2507
          raise errors.OpExecError("Disk %s is degraded on target node,"
2508
                                   " aborting failover." % dev.iv_name)
2509

    
2510
    feedback_fn("* checking target node resource availability")
2511
    nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
2512

    
2513
    if not nodeinfo:
2514
      raise errors.OpExecError("Could not contact target node %s." %
2515
                               target_node)
2516

    
2517
    free_memory = int(nodeinfo[target_node]['memory_free'])
2518
    memory = instance.memory
2519
    if memory > free_memory:
2520
      raise errors.OpExecError("Not enough memory to create instance %s on"
2521
                               " node %s. needed %s MiB, available %s MiB" %
2522
                               (instance.name, target_node, memory,
2523
                                free_memory))
2524

    
2525
    feedback_fn("* shutting down instance on source node")
2526
    logger.Info("Shutting down instance %s on node %s" %
2527
                (instance.name, source_node))
2528

    
2529
    if not rpc.call_instance_shutdown(source_node, instance):
2530
      if self.op.ignore_consistency:
2531
        logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2532
                     " anyway. Please make sure node %s is down"  %
2533
                     (instance.name, source_node, source_node))
2534
      else:
2535
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2536
                                 (instance.name, source_node))
2537

    
2538
    feedback_fn("* deactivating the instance's disks on source node")
2539
    if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
2540
      raise errors.OpExecError("Can't shut down the instance's disks.")
2541

    
2542
    instance.primary_node = target_node
2543
    # distribute new instance config to the other nodes
2544
    self.cfg.AddInstance(instance)
2545

    
2546
    feedback_fn("* activating the instance's disks on target node")
2547
    logger.Info("Starting instance %s on node %s" %
2548
                (instance.name, target_node))
2549

    
2550
    disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
2551
                                             ignore_secondaries=True)
2552
    if not disks_ok:
2553
      _ShutdownInstanceDisks(instance, self.cfg)
2554
      raise errors.OpExecError("Can't activate the instance's disks")
2555

    
2556
    feedback_fn("* starting the instance on the target node")
2557
    if not rpc.call_instance_start(target_node, instance, None):
2558
      _ShutdownInstanceDisks(instance, self.cfg)
2559
      raise errors.OpExecError("Could not start instance %s on node %s." %
2560
                               (instance.name, target_node))
2561

    
2562

    
2563
def _CreateBlockDevOnPrimary(cfg, node, device, info):
2564
  """Create a tree of block devices on the primary node.
2565

2566
  This always creates all devices.
2567

2568
  """
2569
  if device.children:
2570
    for child in device.children:
2571
      if not _CreateBlockDevOnPrimary(cfg, node, child, info):
2572
        return False
2573

    
2574
  cfg.SetDiskID(device, node)
2575
  new_id = rpc.call_blockdev_create(node, device, device.size, True, info)
2576
  if not new_id:
2577
    return False
2578
  if device.physical_id is None:
2579
    device.physical_id = new_id
2580
  return True
2581

    
2582

    
2583
def _CreateBlockDevOnSecondary(cfg, node, device, force, info):
2584
  """Create a tree of block devices on a secondary node.
2585

2586
  If this device type has to be created on secondaries, create it and
2587
  all its children.
2588

2589
  If not, just recurse to children keeping the same 'force' value.
2590

2591
  """
2592
  if device.CreateOnSecondary():
2593
    force = True
2594
  if device.children:
2595
    for child in device.children:
2596
      if not _CreateBlockDevOnSecondary(cfg, node, child, force, info):
2597
        return False
2598

    
2599
  if not force:
2600
    return True
2601
  cfg.SetDiskID(device, node)
2602
  new_id = rpc.call_blockdev_create(node, device, device.size, False, info)
2603
  if not new_id:
2604
    return False
2605
  if device.physical_id is None:
2606
    device.physical_id = new_id
2607
  return True
2608

    
2609

    
2610
def _GenerateUniqueNames(cfg, exts):
2611
  """Generate a suitable LV name.
2612

2613
  This will generate a logical volume name for the given instance.
2614

2615
  """
2616
  results = []
2617
  for val in exts:
2618
    new_id = cfg.GenerateUniqueID()
2619
    results.append("%s%s" % (new_id, val))
2620
  return results
2621

    
2622

    
2623
def _GenerateMDDRBDBranch(cfg, primary, secondary, size, names):
2624
  """Generate a drbd device complete with its children.
2625

2626
  """
2627
  port = cfg.AllocatePort()
2628
  vgname = cfg.GetVGName()
2629
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2630
                          logical_id=(vgname, names[0]))
2631
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2632
                          logical_id=(vgname, names[1]))
2633
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD7, size=size,
2634
                          logical_id = (primary, secondary, port),
2635
                          children = [dev_data, dev_meta])
2636
  return drbd_dev
2637

    
2638

    
2639
def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name):
2640
  """Generate a drbd8 device complete with its children.
2641

2642
  """
2643
  port = cfg.AllocatePort()
2644
  vgname = cfg.GetVGName()
2645
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2646
                          logical_id=(vgname, names[0]))
2647
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2648
                          logical_id=(vgname, names[1]))
2649
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
2650
                          logical_id = (primary, secondary, port),
2651
                          children = [dev_data, dev_meta],
2652
                          iv_name=iv_name)
2653
  return drbd_dev
2654

    
2655
def _GenerateDiskTemplate(cfg, template_name,
2656
                          instance_name, primary_node,
2657
                          secondary_nodes, disk_sz, swap_sz):
2658
  """Generate the entire disk layout for a given template type.
2659

2660
  """
2661
  #TODO: compute space requirements
2662

    
2663
  vgname = cfg.GetVGName()
2664
  if template_name == "diskless":
2665
    disks = []
2666
  elif template_name == "plain":
2667
    if len(secondary_nodes) != 0:
2668
      raise errors.ProgrammerError("Wrong template configuration")
2669

    
2670
    names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
2671
    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2672
                           logical_id=(vgname, names[0]),
2673
                           iv_name = "sda")
2674
    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2675
                           logical_id=(vgname, names[1]),
2676
                           iv_name = "sdb")
2677
    disks = [sda_dev, sdb_dev]
2678
  elif template_name == "local_raid1":
2679
    if len(secondary_nodes) != 0:
2680
      raise errors.ProgrammerError("Wrong template configuration")
2681

    
2682

    
2683
    names = _GenerateUniqueNames(cfg, [".sda_m1", ".sda_m2",
2684
                                       ".sdb_m1", ".sdb_m2"])
2685
    sda_dev_m1 = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2686
                              logical_id=(vgname, names[0]))
2687
    sda_dev_m2 = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2688
                              logical_id=(vgname, names[1]))
2689
    md_sda_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name = "sda",
2690
                              size=disk_sz,
2691
                              children = [sda_dev_m1, sda_dev_m2])
2692
    sdb_dev_m1 = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2693
                              logical_id=(vgname, names[2]))
2694
    sdb_dev_m2 = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2695
                              logical_id=(vgname, names[3]))
2696
    md_sdb_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name = "sdb",
2697
                              size=swap_sz,
2698
                              children = [sdb_dev_m1, sdb_dev_m2])
2699
    disks = [md_sda_dev, md_sdb_dev]
2700
  elif template_name == constants.DT_REMOTE_RAID1:
2701
    if len(secondary_nodes) != 1:
2702
      raise errors.ProgrammerError("Wrong template configuration")
2703
    remote_node = secondary_nodes[0]
2704
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2705
                                       ".sdb_data", ".sdb_meta"])
2706
    drbd_sda_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
2707
                                         disk_sz, names[0:2])
2708
    md_sda_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name="sda",
2709
                              children = [drbd_sda_dev], size=disk_sz)
2710
    drbd_sdb_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
2711
                                         swap_sz, names[2:4])
2712
    md_sdb_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name="sdb",
2713
                              children = [drbd_sdb_dev], size=swap_sz)
2714
    disks = [md_sda_dev, md_sdb_dev]
2715
  elif template_name == constants.DT_DRBD8:
2716
    if len(secondary_nodes) != 1:
2717
      raise errors.ProgrammerError("Wrong template configuration")
2718
    remote_node = secondary_nodes[0]
2719
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2720
                                       ".sdb_data", ".sdb_meta"])
2721
    drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2722
                                         disk_sz, names[0:2], "sda")
2723
    drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2724
                                         swap_sz, names[2:4], "sdb")
2725
    disks = [drbd_sda_dev, drbd_sdb_dev]
2726
  else:
2727
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
2728
  return disks
2729

    
2730

    
2731
def _GetInstanceInfoText(instance):
2732
  """Compute that text that should be added to the disk's metadata.
2733

2734
  """
2735
  return "originstname+%s" % instance.name
2736

    
2737

    
2738
def _CreateDisks(cfg, instance):
2739
  """Create all disks for an instance.
2740

2741
  This abstracts away some work from AddInstance.
2742

2743
  Args:
2744
    instance: the instance object
2745

2746
  Returns:
2747
    True or False showing the success of the creation process
2748

2749
  """
2750
  info = _GetInstanceInfoText(instance)
2751

    
2752
  for device in instance.disks:
2753
    logger.Info("creating volume %s for instance %s" %
2754
              (device.iv_name, instance.name))
2755
    #HARDCODE
2756
    for secondary_node in instance.secondary_nodes:
2757
      if not _CreateBlockDevOnSecondary(cfg, secondary_node, device, False,
2758
                                        info):
2759
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
2760
                     (device.iv_name, device, secondary_node))
2761
        return False
2762
    #HARDCODE
2763
    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node, device, info):
2764
      logger.Error("failed to create volume %s on primary!" %
2765
                   device.iv_name)
2766
      return False
2767
  return True
2768

    
2769

    
2770
def _RemoveDisks(instance, cfg):
2771
  """Remove all disks for an instance.
2772

2773
  This abstracts away some work from `AddInstance()` and
2774
  `RemoveInstance()`. Note that in case some of the devices couldn't
2775
  be removed, the removal will continue with the other ones (compare
2776
  with `_CreateDisks()`).
2777

2778
  Args:
2779
    instance: the instance object
2780

2781
  Returns:
2782
    True or False showing the success of the removal proces
2783

2784
  """
2785
  logger.Info("removing block devices for instance %s" % instance.name)
2786

    
2787
  result = True
2788
  for device in instance.disks:
2789
    for node, disk in device.ComputeNodeTree(instance.primary_node):
2790
      cfg.SetDiskID(disk, node)
2791
      if not rpc.call_blockdev_remove(node, disk):
2792
        logger.Error("could not remove block device %s on node %s,"
2793
                     " continuing anyway" %
2794
                     (device.iv_name, node))
2795
        result = False
2796
  return result
2797

    
2798

    
2799
class LUCreateInstance(LogicalUnit):
2800
  """Create an instance.
2801

2802
  """
2803
  HPATH = "instance-add"
2804
  HTYPE = constants.HTYPE_INSTANCE
2805
  _OP_REQP = ["instance_name", "mem_size", "disk_size", "pnode",
2806
              "disk_template", "swap_size", "mode", "start", "vcpus",
2807
              "wait_for_sync", "ip_check"]
2808

    
2809
  def BuildHooksEnv(self):
2810
    """Build hooks env.
2811

2812
    This runs on master, primary and secondary nodes of the instance.
2813

2814
    """
2815
    env = {
2816
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
2817
      "INSTANCE_DISK_SIZE": self.op.disk_size,
2818
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
2819
      "INSTANCE_ADD_MODE": self.op.mode,
2820
      }
2821
    if self.op.mode == constants.INSTANCE_IMPORT:
2822
      env["INSTANCE_SRC_NODE"] = self.op.src_node
2823
      env["INSTANCE_SRC_PATH"] = self.op.src_path
2824
      env["INSTANCE_SRC_IMAGE"] = self.src_image
2825

    
2826
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
2827
      primary_node=self.op.pnode,
2828
      secondary_nodes=self.secondaries,
2829
      status=self.instance_status,
2830
      os_type=self.op.os_type,
2831
      memory=self.op.mem_size,
2832
      vcpus=self.op.vcpus,
2833
      nics=[(self.inst_ip, self.op.bridge)],
2834
    ))
2835

    
2836
    nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
2837
          self.secondaries)
2838
    return env, nl, nl
2839

    
2840

    
2841
  def CheckPrereq(self):
2842
    """Check prerequisites.
2843

2844
    """
2845
    if self.op.mode not in (constants.INSTANCE_CREATE,
2846
                            constants.INSTANCE_IMPORT):
2847
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
2848
                                 self.op.mode)
2849

    
2850
    if self.op.mode == constants.INSTANCE_IMPORT:
2851
      src_node = getattr(self.op, "src_node", None)
2852
      src_path = getattr(self.op, "src_path", None)
2853
      if src_node is None or src_path is None:
2854
        raise errors.OpPrereqError("Importing an instance requires source"
2855
                                   " node and path options")
2856
      src_node_full = self.cfg.ExpandNodeName(src_node)
2857
      if src_node_full is None:
2858
        raise errors.OpPrereqError("Unknown source node '%s'" % src_node)
2859
      self.op.src_node = src_node = src_node_full
2860

    
2861
      if not os.path.isabs(src_path):
2862
        raise errors.OpPrereqError("The source path must be absolute")
2863

    
2864
      export_info = rpc.call_export_info(src_node, src_path)
2865

    
2866
      if not export_info:
2867
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
2868

    
2869
      if not export_info.has_section(constants.INISECT_EXP):
2870
        raise errors.ProgrammerError("Corrupted export config")
2871

    
2872
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
2873
      if (int(ei_version) != constants.EXPORT_VERSION):
2874
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
2875
                                   (ei_version, constants.EXPORT_VERSION))
2876

    
2877
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
2878
        raise errors.OpPrereqError("Can't import instance with more than"
2879
                                   " one data disk")
2880

    
2881
      # FIXME: are the old os-es, disk sizes, etc. useful?
2882
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
2883
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
2884
                                                         'disk0_dump'))
2885
      self.src_image = diskimage
2886
    else: # INSTANCE_CREATE
2887
      if getattr(self.op, "os_type", None) is None:
2888
        raise errors.OpPrereqError("No guest OS specified")
2889

    
2890
    # check primary node
2891
    pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode))
2892
    if pnode is None:
2893
      raise errors.OpPrereqError("Primary node '%s' is unknown" %
2894
                                 self.op.pnode)
2895
    self.op.pnode = pnode.name
2896
    self.pnode = pnode
2897
    self.secondaries = []
2898
    # disk template and mirror node verification
2899
    if self.op.disk_template not in constants.DISK_TEMPLATES:
2900
      raise errors.OpPrereqError("Invalid disk template name")
2901

    
2902
    if self.op.disk_template in constants.DTS_NET_MIRROR:
2903
      if getattr(self.op, "snode", None) is None:
2904
        raise errors.OpPrereqError("The networked disk templates need"
2905
                                   " a mirror node")
2906

    
2907
      snode_name = self.cfg.ExpandNodeName(self.op.snode)
2908
      if snode_name is None:
2909
        raise errors.OpPrereqError("Unknown secondary node '%s'" %
2910
                                   self.op.snode)
2911
      elif snode_name == pnode.name:
2912
        raise errors.OpPrereqError("The secondary node cannot be"
2913
                                   " the primary node.")
2914
      self.secondaries.append(snode_name)
2915

    
2916
    # Check lv size requirements
2917
    nodenames = [pnode.name] + self.secondaries
2918
    nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
2919

    
2920
    # Required free disk space as a function of disk and swap space
2921
    req_size_dict = {
2922
      constants.DT_DISKLESS: 0,
2923
      constants.DT_PLAIN: self.op.disk_size + self.op.swap_size,
2924
      constants.DT_LOCAL_RAID1: (self.op.disk_size + self.op.swap_size) * 2,
2925
      # 256 MB are added for drbd metadata, 128MB for each drbd device
2926
      constants.DT_REMOTE_RAID1: self.op.disk_size + self.op.swap_size + 256,
2927
      constants.DT_DRBD8: self.op.disk_size + self.op.swap_size + 256,
2928
    }
2929

    
2930
    if self.op.disk_template not in req_size_dict:
2931
      raise errors.ProgrammerError("Disk template '%s' size requirement"
2932
                                   " is unknown" %  self.op.disk_template)
2933

    
2934
    req_size = req_size_dict[self.op.disk_template]
2935

    
2936
    for node in nodenames:
2937
      info = nodeinfo.get(node, None)
2938
      if not info:
2939
        raise errors.OpPrereqError("Cannot get current information"
2940
                                   " from node '%s'" % nodeinfo)
2941
      if req_size > info['vg_free']:
2942
        raise errors.OpPrereqError("Not enough disk space on target node %s."
2943
                                   " %d MB available, %d MB required" %
2944
                                   (node, info['vg_free'], req_size))
2945

    
2946
    # os verification
2947
    os_obj = rpc.call_os_get([pnode.name], self.op.os_type)[pnode.name]
2948
    if not isinstance(os_obj, objects.OS):
2949
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
2950
                                 " primary node"  % self.op.os_type)
2951

    
2952
    # instance verification
2953
    hostname1 = utils.HostInfo(self.op.instance_name)
2954

    
2955
    self.op.instance_name = instance_name = hostname1.name
2956
    instance_list = self.cfg.GetInstanceList()
2957
    if instance_name in instance_list:
2958
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2959
                                 instance_name)
2960

    
2961
    ip = getattr(self.op, "ip", None)
2962
    if ip is None or ip.lower() == "none":
2963
      inst_ip = None
2964
    elif ip.lower() == "auto":
2965
      inst_ip = hostname1.ip
2966
    else:
2967
      if not utils.IsValidIP(ip):
2968
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
2969
                                   " like a valid IP" % ip)
2970
      inst_ip = ip
2971
    self.inst_ip = inst_ip
2972

    
2973
    if self.op.start and not self.op.ip_check:
2974
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
2975
                                 " adding an instance in start mode")
2976

    
2977
    if self.op.ip_check:
2978
      if utils.TcpPing(utils.HostInfo().name, hostname1.ip,
2979
                       constants.DEFAULT_NODED_PORT):
2980
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2981
                                   (hostname1.ip, instance_name))
2982

    
2983
    # bridge verification
2984
    bridge = getattr(self.op, "bridge", None)
2985
    if bridge is None:
2986
      self.op.bridge = self.cfg.GetDefBridge()
2987
    else:
2988
      self.op.bridge = bridge
2989

    
2990
    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
2991
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
2992
                                 " destination node '%s'" %
2993
                                 (self.op.bridge, pnode.name))
2994

    
2995
    if self.op.start:
2996
      self.instance_status = 'up'
2997
    else:
2998
      self.instance_status = 'down'
2999

    
3000
  def Exec(self, feedback_fn):
3001
    """Create and add the instance to the cluster.
3002

3003
    """
3004
    instance = self.op.instance_name
3005
    pnode_name = self.pnode.name
3006

    
3007
    nic = objects.NIC(bridge=self.op.bridge, mac=self.cfg.GenerateMAC())
3008
    if self.inst_ip is not None:
3009
      nic.ip = self.inst_ip
3010

    
3011
    disks = _GenerateDiskTemplate(self.cfg,
3012
                                  self.op.disk_template,
3013
                                  instance, pnode_name,
3014
                                  self.secondaries, self.op.disk_size,
3015
                                  self.op.swap_size)
3016

    
3017
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3018
                            primary_node=pnode_name,
3019
                            memory=self.op.mem_size,
3020
                            vcpus=self.op.vcpus,
3021
                            nics=[nic], disks=disks,
3022
                            disk_template=self.op.disk_template,
3023
                            status=self.instance_status,
3024
                            )
3025

    
3026
    feedback_fn("* creating instance disks...")
3027
    if not _CreateDisks(self.cfg, iobj):
3028
      _RemoveDisks(iobj, self.cfg)
3029
      raise errors.OpExecError("Device creation failed, reverting...")
3030

    
3031
    feedback_fn("adding instance %s to cluster config" % instance)
3032

    
3033
    self.cfg.AddInstance(iobj)
3034

    
3035
    if self.op.wait_for_sync:
3036
      disk_abort = not _WaitForSync(self.cfg, iobj)
3037
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3038
      # make sure the disks are not degraded (still sync-ing is ok)
3039
      time.sleep(15)
3040
      feedback_fn("* checking mirrors status")
3041
      disk_abort = not _WaitForSync(self.cfg, iobj, oneshot=True)
3042
    else:
3043
      disk_abort = False
3044

    
3045
    if disk_abort:
3046
      _RemoveDisks(iobj, self.cfg)
3047
      self.cfg.RemoveInstance(iobj.name)
3048
      raise errors.OpExecError("There are some degraded disks for"
3049
                               " this instance")
3050

    
3051
    feedback_fn("creating os for instance %s on node %s" %
3052
                (instance, pnode_name))
3053

    
3054
    if iobj.disk_template != constants.DT_DISKLESS:
3055
      if self.op.mode == constants.INSTANCE_CREATE:
3056
        feedback_fn("* running the instance OS create scripts...")
3057
        if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
3058
          raise errors.OpExecError("could not add os for instance %s"
3059
                                   " on node %s" %
3060
                                   (instance, pnode_name))
3061

    
3062
      elif self.op.mode == constants.INSTANCE_IMPORT:
3063
        feedback_fn("* running the instance OS import scripts...")
3064
        src_node = self.op.src_node
3065
        src_image = self.src_image
3066
        if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
3067
                                                src_node, src_image):
3068
          raise errors.OpExecError("Could not import os for instance"
3069
                                   " %s on node %s" %
3070
                                   (instance, pnode_name))
3071
      else:
3072
        # also checked in the prereq part
3073
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3074
                                     % self.op.mode)
3075

    
3076
    if self.op.start:
3077
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
3078
      feedback_fn("* starting instance...")
3079
      if not rpc.call_instance_start(pnode_name, iobj, None):
3080
        raise errors.OpExecError("Could not start instance")
3081

    
3082

    
3083
class LUConnectConsole(NoHooksLU):
3084
  """Connect to an instance's console.
3085

3086
  This is somewhat special in that it returns the command line that
3087
  you need to run on the master node in order to connect to the
3088
  console.
3089

3090
  """
3091
  _OP_REQP = ["instance_name"]
3092

    
3093
  def CheckPrereq(self):
3094
    """Check prerequisites.
3095

3096
    This checks that the instance is in the cluster.
3097

3098
    """
3099
    instance = self.cfg.GetInstanceInfo(
3100
      self.cfg.ExpandInstanceName(self.op.instance_name))
3101
    if instance is None:
3102
      raise errors.OpPrereqError("Instance '%s' not known" %
3103
                                 self.op.instance_name)
3104
    self.instance = instance
3105

    
3106
  def Exec(self, feedback_fn):
3107
    """Connect to the console of an instance
3108

3109
    """
3110
    instance = self.instance
3111
    node = instance.primary_node
3112

    
3113
    node_insts = rpc.call_instance_list([node])[node]
3114
    if node_insts is False:
3115
      raise errors.OpExecError("Can't connect to node %s." % node)
3116

    
3117
    if instance.name not in node_insts:
3118
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3119

    
3120
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
3121

    
3122
    hyper = hypervisor.GetHypervisor()
3123
    console_cmd = hyper.GetShellCommandForConsole(instance.name)
3124
    # build ssh cmdline
3125
    argv = ["ssh", "-q", "-t"]
3126
    argv.extend(ssh.KNOWN_HOSTS_OPTS)
3127
    argv.extend(ssh.BATCH_MODE_OPTS)
3128
    argv.append(node)
3129
    argv.append(console_cmd)
3130
    return "ssh", argv
3131

    
3132

    
3133
class LUAddMDDRBDComponent(LogicalUnit):
3134
  """Adda new mirror member to an instance's disk.
3135

3136
  """
3137
  HPATH = "mirror-add"
3138
  HTYPE = constants.HTYPE_INSTANCE
3139
  _OP_REQP = ["instance_name", "remote_node", "disk_name"]
3140

    
3141
  def BuildHooksEnv(self):
3142
    """Build hooks env.
3143

3144
    This runs on the master, the primary and all the secondaries.
3145

3146
    """
3147
    env = {
3148
      "NEW_SECONDARY": self.op.remote_node,
3149
      "DISK_NAME": self.op.disk_name,
3150
      }
3151
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3152
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
3153
          self.op.remote_node,] + list(self.instance.secondary_nodes)
3154
    return env, nl, nl
3155

    
3156
  def CheckPrereq(self):
3157
    """Check prerequisites.
3158

3159
    This checks that the instance is in the cluster.
3160

3161
    """
3162
    instance = self.cfg.GetInstanceInfo(
3163
      self.cfg.ExpandInstanceName(self.op.instance_name))
3164
    if instance is None:
3165
      raise errors.OpPrereqError("Instance '%s' not known" %
3166
                                 self.op.instance_name)
3167
    self.instance = instance
3168

    
3169
    remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
3170
    if remote_node is None:
3171
      raise errors.OpPrereqError("Node '%s' not known" % self.op.remote_node)
3172
    self.remote_node = remote_node
3173

    
3174
    if remote_node == instance.primary_node:
3175
      raise errors.OpPrereqError("The specified node is the primary node of"
3176
                                 " the instance.")
3177

    
3178
    if instance.disk_template != constants.DT_REMOTE_RAID1:
3179
      raise errors.OpPrereqError("Instance's disk layout is not"
3180
                                 " remote_raid1.")
3181
    for disk in instance.disks:
3182
      if disk.iv_name == self.op.disk_name:
3183
        break
3184
    else:
3185
      raise errors.OpPrereqError("Can't find this device ('%s') in the"
3186
                                 " instance." % self.op.disk_name)
3187
    if len(disk.children) > 1:
3188
      raise errors.OpPrereqError("The device already has two slave"
3189
                                 " devices.\n"
3190
                                 "This would create a 3-disk raid1"
3191
                                 " which we don't allow.")
3192
    self.disk = disk
3193

    
3194
  def Exec(self, feedback_fn):
3195
    """Add the mirror component
3196

3197
    """
3198
    disk = self.disk
3199
    instance = self.instance
3200

    
3201
    remote_node = self.remote_node
3202
    lv_names = [".%s_%s" % (disk.iv_name, suf) for suf in ["data", "meta"]]
3203
    names = _GenerateUniqueNames(self.cfg, lv_names)
3204
    new_drbd = _GenerateMDDRBDBranch(self.cfg, instance.primary_node,
3205
                                     remote_node, disk.size, names)
3206

    
3207
    logger.Info("adding new mirror component on secondary")
3208
    #HARDCODE
3209
    if not _CreateBlockDevOnSecondary(self.cfg, remote_node, new_drbd, False,
3210
                                      _GetInstanceInfoText(instance)):
3211
      raise errors.OpExecError("Failed to create new component on secondary"
3212
                               " node %s" % remote_node)
3213

    
3214
    logger.Info("adding new mirror component on primary")
3215
    #HARDCODE
3216
    if not _CreateBlockDevOnPrimary(self.cfg, instance.primary_node, new_drbd,
3217
                                    _GetInstanceInfoText(instance)):
3218
      # remove secondary dev
3219
      self.cfg.SetDiskID(new_drbd, remote_node)
3220
      rpc.call_blockdev_remove(remote_node, new_drbd)
3221
      raise errors.OpExecError("Failed to create volume on primary")
3222

    
3223
    # the device exists now
3224
    # call the primary node to add the mirror to md
3225
    logger.Info("adding new mirror component to md")
3226
    if not rpc.call_blockdev_addchildren(instance.primary_node,
3227
                                         disk, [new_drbd]):
3228
      logger.Error("Can't add mirror compoment to md!")
3229
      self.cfg.SetDiskID(new_drbd, remote_node)
3230
      if not rpc.call_blockdev_remove(remote_node, new_drbd):
3231
        logger.Error("Can't rollback on secondary")
3232
      self.cfg.SetDiskID(new_drbd, instance.primary_node)
3233
      if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
3234
        logger.Error("Can't rollback on primary")
3235
      raise errors.OpExecError("Can't add mirror component to md array")
3236

    
3237
    disk.children.append(new_drbd)
3238

    
3239
    self.cfg.AddInstance(instance)
3240

    
3241
    _WaitForSync(self.cfg, instance)
3242

    
3243
    return 0
3244

    
3245

    
3246
class LURemoveMDDRBDComponent(LogicalUnit):
3247
  """Remove a component from a remote_raid1 disk.
3248

3249
  """
3250
  HPATH = "mirror-remove"
3251
  HTYPE = constants.HTYPE_INSTANCE
3252
  _OP_REQP = ["instance_name", "disk_name", "disk_id"]
3253

    
3254
  def BuildHooksEnv(self):
3255
    """Build hooks env.
3256

3257
    This runs on the master, the primary and all the secondaries.
3258

3259
    """
3260
    env = {
3261
      "DISK_NAME": self.op.disk_name,
3262
      "DISK_ID": self.op.disk_id,
3263
      "OLD_SECONDARY": self.old_secondary,
3264
      }
3265
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3266
    nl = [self.sstore.GetMasterNode(),
3267
          self.instance.primary_node] + list(self.instance.secondary_nodes)
3268
    return env, nl, nl
3269

    
3270
  def CheckPrereq(self):
3271
    """Check prerequisites.
3272

3273
    This checks that the instance is in the cluster.
3274

3275
    """
3276
    instance = self.cfg.GetInstanceInfo(
3277
      self.cfg.ExpandInstanceName(self.op.instance_name))
3278
    if instance is None:
3279
      raise errors.OpPrereqError("Instance '%s' not known" %
3280
                                 self.op.instance_name)
3281
    self.instance = instance
3282

    
3283
    if instance.disk_template != constants.DT_REMOTE_RAID1:
3284
      raise errors.OpPrereqError("Instance's disk layout is not"
3285
                                 " remote_raid1.")
3286
    for disk in instance.disks:
3287
      if disk.iv_name == self.op.disk_name:
3288
        break
3289
    else:
3290
      raise errors.OpPrereqError("Can't find this device ('%s') in the"
3291
                                 " instance." % self.op.disk_name)
3292
    for child in disk.children:
3293
      if (child.dev_type == constants.LD_DRBD7 and
3294
          child.logical_id[2] == self.op.disk_id):
3295
        break
3296
    else:
3297
      raise errors.OpPrereqError("Can't find the device with this port.")
3298

    
3299
    if len(disk.children) < 2:
3300
      raise errors.OpPrereqError("Cannot remove the last component from"
3301
                                 " a mirror.")
3302
    self.disk = disk
3303
    self.child = child
3304
    if self.child.logical_id[0] == instance.primary_node:
3305
      oid = 1
3306
    else:
3307
      oid = 0
3308
    self.old_secondary = self.child.logical_id[oid]
3309

    
3310
  def Exec(self, feedback_fn):
3311
    """Remove the mirror component
3312

3313
    """
3314
    instance = self.instance
3315
    disk = self.disk
3316
    child = self.child
3317
    logger.Info("remove mirror component")
3318
    self.cfg.SetDiskID(disk, instance.primary_node)
3319
    if not rpc.call_blockdev_removechildren(instance.primary_node,
3320
                                            disk, [child]):
3321
      raise errors.OpExecError("Can't remove child from mirror.")
3322

    
3323
    for node in child.logical_id[:2]:
3324
      self.cfg.SetDiskID(child, node)
3325
      if not rpc.call_blockdev_remove(node, child):
3326
        logger.Error("Warning: failed to remove device from node %s,"
3327
                     " continuing operation." % node)
3328

    
3329
    disk.children.remove(child)
3330
    self.cfg.AddInstance(instance)
3331

    
3332

    
3333
class LUReplaceDisks(LogicalUnit):
3334
  """Replace the disks of an instance.
3335

3336
  """
3337
  HPATH = "mirrors-replace"
3338
  HTYPE = constants.HTYPE_INSTANCE
3339
  _OP_REQP = ["instance_name", "mode", "disks"]
3340

    
3341
  def BuildHooksEnv(self):
3342
    """Build hooks env.
3343

3344
    This runs on the master, the primary and all the secondaries.
3345

3346
    """
3347
    env = {
3348
      "MODE": self.op.mode,
3349
      "NEW_SECONDARY": self.op.remote_node,
3350
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3351
      }
3352
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3353
    nl = [self.sstore.GetMasterNode(),
3354
          self.instance.primary_node] + list(self.instance.secondary_nodes)
3355
    return env, nl, nl
3356

    
3357
  def CheckPrereq(self):
3358
    """Check prerequisites.
3359

3360
    This checks that the instance is in the cluster.
3361

3362
    """
3363
    instance = self.cfg.GetInstanceInfo(
3364
      self.cfg.ExpandInstanceName(self.op.instance_name))
3365
    if instance is None:
3366
      raise errors.OpPrereqError("Instance '%s' not known" %
3367
                                 self.op.instance_name)
3368
    self.instance = instance
3369

    
3370
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3371
      raise errors.OpPrereqError("Instance's disk layout is not"
3372
                                 " network mirrored.")
3373

    
3374
    if len(instance.secondary_nodes) != 1:
3375
      raise errors.OpPrereqError("The instance has a strange layout,"
3376
                                 " expected one secondary but found %d" %
3377
                                 len(instance.secondary_nodes))
3378

    
3379
    self.sec_node = instance.secondary_nodes[0]
3380

    
3381
    remote_node = getattr(self.op, "remote_node", None)
3382
    if remote_node is not None:
3383
      remote_node = self.cfg.ExpandNodeName(remote_node)
3384
      if remote_node is None:
3385
        raise errors.OpPrereqError("Node '%s' not known" %
3386
                                   self.op.remote_node)
3387
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
3388
    else:
3389
      self.remote_node_info = None
3390
    if remote_node == instance.primary_node:
3391
      raise errors.OpPrereqError("The specified node is the primary node of"
3392
                                 " the instance.")
3393
    elif remote_node == self.sec_node:
3394
      # the user gave the current secondary, switch to
3395
      # 'no-replace-secondary' mode
3396
      remote_node = None
3397
    if (instance.disk_template == constants.DT_REMOTE_RAID1 and
3398
        self.op.mode != constants.REPLACE_DISK_ALL):
3399
      raise errors.OpPrereqError("Template 'remote_raid1' only allows all"
3400
                                 " disks replacement, not individual ones")
3401
    if instance.disk_template == constants.DT_DRBD8:
3402
      if self.op.mode == constants.REPLACE_DISK_ALL:
3403
        raise errors.OpPrereqError("Template 'drbd8' only allows primary or"
3404
                                   " secondary disk replacement, not"
3405
                                   " both at once")
3406
      elif self.op.mode == constants.REPLACE_DISK_PRI:
3407
        if remote_node is not None:
3408
          raise errors.OpPrereqError("Template 'drbd8' does not allow changing"
3409
                                     " the secondary while doing a primary"
3410
                                     " node disk replacement")
3411
        self.tgt_node = instance.primary_node
3412
      elif self.op.mode == constants.REPLACE_DISK_SEC:
3413
        self.new_node = remote_node # this can be None, in which case
3414
                                    # we don't change the secondary
3415
        self.tgt_node = instance.secondary_nodes[0]
3416
      else:
3417
        raise errors.ProgrammerError("Unhandled disk replace mode")
3418

    
3419
    for name in self.op.disks:
3420
      if instance.FindDisk(name) is None:
3421
        raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3422
                                   (name, instance.name))
3423
    self.op.remote_node = remote_node
3424

    
3425
  def _ExecRR1(self, feedback_fn):
3426
    """Replace the disks of an instance.
3427

3428
    """
3429
    instance = self.instance
3430
    iv_names = {}
3431
    # start of work
3432
    if self.op.remote_node is None:
3433
      remote_node = self.sec_node
3434
    else:
3435
      remote_node = self.op.remote_node
3436
    cfg = self.cfg
3437
    for dev in instance.disks:
3438
      size = dev.size
3439
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3440
      names = _GenerateUniqueNames(cfg, lv_names)
3441
      new_drbd = _GenerateMDDRBDBranch(cfg, instance.primary_node,
3442
                                       remote_node, size, names)
3443
      iv_names[dev.iv_name] = (dev, dev.children[0], new_drbd)
3444
      logger.Info("adding new mirror component on secondary for %s" %
3445
                  dev.iv_name)
3446
      #HARDCODE
3447
      if not _CreateBlockDevOnSecondary(cfg, remote_node, new_drbd, False,
3448
                                        _GetInstanceInfoText(instance)):
3449
        raise errors.OpExecError("Failed to create new component on"
3450
                                 " secondary node %s\n"
3451
                                 "Full abort, cleanup manually!" %
3452
                                 remote_node)
3453

    
3454
      logger.Info("adding new mirror component on primary")
3455
      #HARDCODE
3456
      if not _CreateBlockDevOnPrimary(cfg, instance.primary_node, new_drbd,
3457
                                      _GetInstanceInfoText(instance)):
3458
        # remove secondary dev
3459
        cfg.SetDiskID(new_drbd, remote_node)
3460
        rpc.call_blockdev_remove(remote_node, new_drbd)
3461
        raise errors.OpExecError("Failed to create volume on primary!\n"
3462
                                 "Full abort, cleanup manually!!")
3463

    
3464
      # the device exists now
3465
      # call the primary node to add the mirror to md
3466
      logger.Info("adding new mirror component to md")
3467
      if not rpc.call_blockdev_addchildren(instance.primary_node, dev,
3468
                                           [new_drbd]):
3469
        logger.Error("Can't add mirror compoment to md!")
3470
        cfg.SetDiskID(new_drbd, remote_node)
3471
        if not rpc.call_blockdev_remove(remote_node, new_drbd):
3472
          logger.Error("Can't rollback on secondary")
3473
        cfg.SetDiskID(new_drbd, instance.primary_node)
3474
        if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
3475
          logger.Error("Can't rollback on primary")
3476
        raise errors.OpExecError("Full abort, cleanup manually!!")
3477

    
3478
      dev.children.append(new_drbd)
3479
      cfg.AddInstance(instance)
3480

    
3481
    # this can fail as the old devices are degraded and _WaitForSync
3482
    # does a combined result over all disks, so we don't check its
3483
    # return value
3484
    _WaitForSync(cfg, instance, unlock=True)
3485

    
3486
    # so check manually all the devices
3487
    for name in iv_names:
3488
      dev, child, new_drbd = iv_names[name]
3489
      cfg.SetDiskID(dev, instance.primary_node)
3490
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3491
      if is_degr:
3492
        raise errors.OpExecError("MD device %s is degraded!" % name)
3493
      cfg.SetDiskID(new_drbd, instance.primary_node)
3494
      is_degr = rpc.call_blockdev_find(instance.primary_node, new_drbd)[5]
3495
      if is_degr:
3496
        raise errors.OpExecError("New drbd device %s is degraded!" % name)
3497

    
3498
    for name in iv_names:
3499
      dev, child, new_drbd = iv_names[name]
3500
      logger.Info("remove mirror %s component" % name)
3501
      cfg.SetDiskID(dev, instance.primary_node)
3502
      if not rpc.call_blockdev_removechildren(instance.primary_node,
3503
                                              dev, [child]):
3504
        logger.Error("Can't remove child from mirror, aborting"
3505
                     " *this device cleanup*.\nYou need to cleanup manually!!")
3506
        continue
3507

    
3508
      for node in child.logical_id[:2]:
3509
        logger.Info("remove child device on %s" % node)
3510
        cfg.SetDiskID(child, node)
3511
        if not rpc.call_blockdev_remove(node, child):
3512
          logger.Error("Warning: failed to remove device from node %s,"
3513
                       " continuing operation." % node)
3514

    
3515
      dev.children.remove(child)
3516

    
3517
      cfg.AddInstance(instance)
3518

    
3519
  def _ExecD8DiskOnly(self, feedback_fn):
3520
    """Replace a disk on the primary or secondary for dbrd8.
3521

3522
    The algorithm for replace is quite complicated:
3523
      - for each disk to be replaced:
3524
        - create new LVs on the target node with unique names
3525
        - detach old LVs from the drbd device
3526
        - rename old LVs to name_replaced.<time_t>
3527
        - rename new LVs to old LVs
3528
        - attach the new LVs (with the old names now) to the drbd device
3529
      - wait for sync across all devices
3530
      - for each modified disk:
3531
        - remove old LVs (which have the name name_replaces.<time_t>)
3532

3533
    Failures are not very well handled.
3534
    """
3535
    instance = self.instance
3536
    iv_names = {}
3537
    vgname = self.cfg.GetVGName()
3538
    # start of work
3539
    cfg = self.cfg
3540
    tgt_node = self.tgt_node
3541
    for dev in instance.disks:
3542
      if not dev.iv_name in self.op.disks:
3543
        continue
3544
      size = dev.size
3545
      cfg.SetDiskID(dev, tgt_node)
3546
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3547
      names = _GenerateUniqueNames(cfg, lv_names)
3548
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3549
                             logical_id=(vgname, names[0]))
3550
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3551
                             logical_id=(vgname, names[1]))
3552
      new_lvs = [lv_data, lv_meta]
3553
      old_lvs = dev.children
3554
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
3555
      logger.Info("adding new local storage on %s for %s" %
3556
                  (tgt_node, dev.iv_name))
3557
      # since we *always* want to create this LV, we use the
3558
      # _Create...OnPrimary (which forces the creation), even if we
3559
      # are talking about the secondary node
3560
      for new_lv in new_lvs:
3561
        if not _CreateBlockDevOnPrimary(cfg, tgt_node, new_lv,
3562
                                        _GetInstanceInfoText(instance)):
3563
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3564
                                   " node '%s'" %
3565
                                   (new_lv.logical_id[1], tgt_node))
3566

    
3567
      if not rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
3568
        raise errors.OpExecError("Can't detach drbd from local storage on node"
3569
                                 " %s for device %s" % (tgt_node, dev.iv_name))
3570
      dev.children = []
3571
      cfg.Update(instance)
3572

    
3573
      # ok, we created the new LVs, so now we know we have the needed
3574
      # storage; as such, we proceed on the target node to rename
3575
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
3576
      # using the assumption than logical_id == physical_id (which in
3577
      # turn is the unique_id on that node)
3578
      temp_suffix = int(time.time())
3579
      logger.Info("renaming the old LVs on the target node")
3580
      ren_fn = lambda d, suff: (d.physical_id[0],
3581
                                d.physical_id[1] + "_replaced-%s" % suff)
3582
      rlist = [(disk, ren_fn(disk, temp_suffix)) for disk in old_lvs]
3583
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3584
        logger.Error("Can't rename old LVs on node %s" % tgt_node)
3585
        do_change_old = False
3586
      else:
3587
        do_change_old = True
3588
      # now we rename the new LVs to the old LVs
3589
      logger.Info("renaming the new LVs on the target node")
3590
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
3591
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3592
        logger.Error("Can't rename new LVs on node %s" % tgt_node)
3593
      else:
3594
        for old, new in zip(old_lvs, new_lvs):
3595
          new.logical_id = old.logical_id
3596
          cfg.SetDiskID(new, tgt_node)
3597

    
3598
      if do_change_old:
3599
        for disk in old_lvs:
3600
          disk.logical_id = ren_fn(disk, temp_suffix)
3601
          cfg.SetDiskID(disk, tgt_node)
3602

    
3603
      # now that the new lvs have the old name, we can add them to the device
3604
      logger.Info("adding new mirror component on %s" % tgt_node)
3605
      if not rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
3606
        logger.Error("Can't add local storage to drbd!")
3607
        for new_lv in new_lvs:
3608
          if not rpc.call_blockdev_remove(tgt_node, new_lv):
3609
            logger.Error("Can't rollback device %s")
3610
        return
3611

    
3612
      dev.children = new_lvs
3613
      cfg.Update(instance)
3614

    
3615

    
3616
    # this can fail as the old devices are degraded and _WaitForSync
3617
    # does a combined result over all disks, so we don't check its
3618
    # return value
3619
    logger.Info("Done changing drbd configs, waiting for sync")
3620
    _WaitForSync(cfg, instance, unlock=True)
3621

    
3622
    # so check manually all the devices
3623
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3624
      cfg.SetDiskID(dev, instance.primary_node)
3625
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3626
      if is_degr:
3627
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3628

    
3629
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3630
      logger.Info("remove logical volumes for %s" % name)
3631
      for lv in old_lvs:
3632
        cfg.SetDiskID(lv, tgt_node)
3633
        if not rpc.call_blockdev_remove(tgt_node, lv):
3634
          logger.Error("Can't cleanup child device, skipping. You need to"
3635
                       " fix manually!")
3636
          continue
3637

    
3638
  def _ExecD8Secondary(self, feedback_fn):
3639
    """Replace the secondary node for drbd8.
3640

3641
    The algorithm for replace is quite complicated:
3642
      - for all disks of the instance:
3643
        - create new LVs on the new node with same names
3644
        - shutdown the drbd device on the old secondary
3645
        - disconnect the drbd network on the primary
3646
        - create the drbd device on the new secondary
3647
        - network attach the drbd on the primary, using an artifice:
3648
          the drbd code for Attach() will connect to the network if it
3649
          finds a device which is connected to the good local disks but
3650
          not network enabled
3651
      - wait for sync across all devices
3652
      - remove all disks from the old secondary
3653

3654
    Failures are not very well handled.
3655
    """
3656
    instance = self.instance
3657
    iv_names = {}
3658
    vgname = self.cfg.GetVGName()
3659
    # start of work
3660
    cfg = self.cfg
3661
    old_node = self.tgt_node
3662
    new_node = self.new_node
3663
    pri_node = instance.primary_node
3664
    for dev in instance.disks:
3665
      size = dev.size
3666
      logger.Info("adding new local storage on %s for %s" %
3667
                  (new_node, dev.iv_name))
3668
      # since we *always* want to create this LV, we use the
3669
      # _Create...OnPrimary (which forces the creation), even if we
3670
      # are talking about the secondary node
3671
      for new_lv in dev.children:
3672
        if not _CreateBlockDevOnPrimary(cfg, new_node, new_lv,
3673
                                        _GetInstanceInfoText(instance)):
3674
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3675
                                   " node '%s'" %
3676
                                   (new_lv.logical_id[1], new_node))
3677

    
3678
      # create new devices on new_node
3679
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
3680
                              logical_id=(pri_node, new_node,
3681
                                          dev.logical_id[2]),
3682
                              children=dev.children)
3683
      if not _CreateBlockDevOnSecondary(cfg, new_node, new_drbd, False,
3684
                                      _GetInstanceInfoText(instance)):
3685
        raise errors.OpExecError("Failed to create new DRBD on"
3686
                                 " node '%s'" % new_node)
3687

    
3688
      # we have new devices, shutdown the drbd on the old secondary
3689
      cfg.SetDiskID(dev, old_node)
3690
      if not rpc.call_blockdev_shutdown(old_node, dev):
3691
        raise errors.OpExecError("Failed to shutdown DRBD on old node")
3692

    
3693
      # we have new storage, we 'rename' the network on the primary
3694
      cfg.SetDiskID(dev, pri_node)
3695
      # rename to the ip of the new node
3696
      new_uid = list(dev.physical_id)
3697
      new_uid[2] = self.remote_node_info.secondary_ip
3698
      rlist = [(dev, tuple(new_uid))]
3699
      if not rpc.call_blockdev_rename(pri_node, rlist):
3700
        raise errors.OpExecError("Can't detach re-attach drbd %s on node"
3701
                                 " %s from %s to %s" %
3702
                                 (dev.iv_name, pri_node, old_node, new_node))
3703
      dev.logical_id = (pri_node, new_node, dev.logical_id[2])
3704
      cfg.SetDiskID(dev, pri_node)
3705
      cfg.Update(instance)
3706

    
3707
      iv_names[dev.iv_name] = (dev, dev.children)
3708

    
3709
    # this can fail as the old devices are degraded and _WaitForSync
3710
    # does a combined result over all disks, so we don't check its
3711
    # return value
3712
    logger.Info("Done changing drbd configs, waiting for sync")
3713
    _WaitForSync(cfg, instance, unlock=True)
3714

    
3715
    # so check manually all the devices
3716
    for name, (dev, old_lvs) in iv_names.iteritems():
3717
      cfg.SetDiskID(dev, pri_node)
3718
      is_degr = rpc.call_blockdev_find(pri_node, dev)[5]
3719
      if is_degr:
3720
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3721

    
3722
    for name, (dev, old_lvs) in iv_names.iteritems():
3723
      logger.Info("remove logical volumes for %s" % name)
3724
      for lv in old_lvs:
3725
        cfg.SetDiskID(lv, old_node)
3726
        if not rpc.call_blockdev_remove(old_node, lv):
3727
          logger.Error("Can't cleanup child device, skipping. You need to"
3728
                       " fix manually!")
3729
          continue
3730

    
3731
  def Exec(self, feedback_fn):
3732
    """Execute disk replacement.
3733

3734
    This dispatches the disk replacement to the appropriate handler.
3735

3736
    """
3737
    instance = self.instance
3738
    if instance.disk_template == constants.DT_REMOTE_RAID1:
3739
      fn = self._ExecRR1
3740
    elif instance.disk_template == constants.DT_DRBD8:
3741
      if self.op.remote_node is None:
3742
        fn = self._ExecD8DiskOnly
3743
      else:
3744
        fn = self._ExecD8Secondary
3745
    else:
3746
      raise errors.ProgrammerError("Unhandled disk replacement case")
3747
    return fn(feedback_fn)
3748

    
3749

    
3750
class LUQueryInstanceData(NoHooksLU):
3751
  """Query runtime instance data.
3752

3753
  """
3754
  _OP_REQP = ["instances"]
3755

    
3756
  def CheckPrereq(self):
3757
    """Check prerequisites.
3758

3759
    This only checks the optional instance list against the existing names.
3760

3761
    """
3762
    if not isinstance(self.op.instances, list):
3763
      raise errors.OpPrereqError("Invalid argument type 'instances'")
3764
    if self.op.instances:
3765
      self.wanted_instances = []
3766
      names = self.op.instances
3767
      for name in names:
3768
        instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name))
3769
        if instance is None:
3770
          raise errors.OpPrereqError("No such instance name '%s'" % name)
3771
      self.wanted_instances.append(instance)
3772
    else:
3773
      self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
3774
                               in self.cfg.GetInstanceList()]
3775
    return
3776

    
3777

    
3778
  def _ComputeDiskStatus(self, instance, snode, dev):
3779
    """Compute block device status.
3780

3781
    """
3782
    self.cfg.SetDiskID(dev, instance.primary_node)
3783
    dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
3784
    if dev.dev_type in constants.LDS_DRBD:
3785
      # we change the snode then (otherwise we use the one passed in)
3786
      if dev.logical_id[0] == instance.primary_node:
3787
        snode = dev.logical_id[1]
3788
      else:
3789
        snode = dev.logical_id[0]
3790

    
3791
    if snode:
3792
      self.cfg.SetDiskID(dev, snode)
3793
      dev_sstatus = rpc.call_blockdev_find(snode, dev)
3794
    else:
3795
      dev_sstatus = None
3796

    
3797
    if dev.children:
3798
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
3799
                      for child in dev.children]
3800
    else:
3801
      dev_children = []
3802

    
3803
    data = {
3804
      "iv_name": dev.iv_name,
3805
      "dev_type": dev.dev_type,
3806
      "logical_id": dev.logical_id,
3807
      "physical_id": dev.physical_id,
3808
      "pstatus": dev_pstatus,
3809
      "sstatus": dev_sstatus,
3810
      "children": dev_children,
3811
      }
3812

    
3813
    return data
3814

    
3815
  def Exec(self, feedback_fn):
3816
    """Gather and return data"""
3817
    result = {}
3818
    for instance in self.wanted_instances:
3819
      remote_info = rpc.call_instance_info(instance.primary_node,
3820
                                                instance.name)
3821
      if remote_info and "state" in remote_info:
3822
        remote_state = "up"
3823
      else:
3824
        remote_state = "down"
3825
      if instance.status == "down":
3826
        config_state = "down"
3827
      else:
3828
        config_state = "up"
3829

    
3830
      disks = [self._ComputeDiskStatus(instance, None, device)
3831
               for device in instance.disks]
3832

    
3833
      idict = {
3834
        "name": instance.name,
3835
        "config_state": config_state,
3836
        "run_state": remote_state,
3837
        "pnode": instance.primary_node,
3838
        "snodes": instance.secondary_nodes,
3839
        "os": instance.os,
3840
        "memory": instance.memory,
3841
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
3842
        "disks": disks,
3843
        "vcpus": instance.vcpus,
3844
        }
3845

    
3846
      result[instance.name] = idict
3847

    
3848
    return result
3849

    
3850

    
3851
class LUSetInstanceParms(LogicalUnit):
3852
  """Modifies an instances's parameters.
3853

3854
  """
3855
  HPATH = "instance-modify"
3856
  HTYPE = constants.HTYPE_INSTANCE
3857
  _OP_REQP = ["instance_name"]
3858

    
3859
  def BuildHooksEnv(self):
3860
    """Build hooks env.
3861

3862
    This runs on the master, primary and secondaries.
3863

3864
    """
3865
    args = dict()
3866
    if self.mem:
3867
      args['memory'] = self.mem
3868
    if self.vcpus:
3869
      args['vcpus'] = self.vcpus
3870
    if self.do_ip or self.do_bridge:
3871
      if self.do_ip:
3872
        ip = self.ip
3873
      else:
3874
        ip = self.instance.nics[0].ip
3875
      if self.bridge:
3876
        bridge = self.bridge
3877
      else:
3878
        bridge = self.instance.nics[0].bridge
3879
      args['nics'] = [(ip, bridge)]
3880
    env = _BuildInstanceHookEnvByObject(self.instance, override=args)
3881
    nl = [self.sstore.GetMasterNode(),
3882
          self.instance.primary_node] + list(self.instance.secondary_nodes)
3883
    return env, nl, nl
3884

    
3885
  def CheckPrereq(self):
3886
    """Check prerequisites.
3887

3888
    This only checks the instance list against the existing names.
3889

3890
    """
3891
    self.mem = getattr(self.op, "mem", None)
3892
    self.vcpus = getattr(self.op, "vcpus", None)
3893
    self.ip = getattr(self.op, "ip", None)
3894
    self.bridge = getattr(self.op, "bridge", None)
3895
    if [self.mem, self.vcpus, self.ip, self.bridge].count(None) == 4:
3896
      raise errors.OpPrereqError("No changes submitted")
3897
    if self.mem is not None:
3898
      try:
3899
        self.mem = int(self.mem)
3900
      except ValueError, err:
3901
        raise errors.OpPrereqError("Invalid memory size: %s" % str(err))
3902
    if self.vcpus is not None:
3903
      try:
3904
        self.vcpus = int(self.vcpus)
3905
      except ValueError, err:
3906
        raise errors.OpPrereqError("Invalid vcpus number: %s" % str(err))
3907
    if self.ip is not None:
3908
      self.do_ip = True
3909
      if self.ip.lower() == "none":
3910
        self.ip = None
3911
      else:
3912
        if not utils.IsValidIP(self.ip):
3913
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
3914
    else:
3915
      self.do_ip = False
3916
    self.do_bridge = (self.bridge is not None)
3917

    
3918
    instance = self.cfg.GetInstanceInfo(
3919
      self.cfg.ExpandInstanceName(self.op.instance_name))
3920
    if instance is None:
3921
      raise errors.OpPrereqError("No such instance name '%s'" %
3922
                                 self.op.instance_name)
3923
    self.op.instance_name = instance.name
3924
    self.instance = instance
3925
    return
3926

    
3927
  def Exec(self, feedback_fn):
3928
    """Modifies an instance.
3929

3930
    All parameters take effect only at the next restart of the instance.
3931
    """
3932
    result = []
3933
    instance = self.instance
3934
    if self.mem:
3935
      instance.memory = self.mem
3936
      result.append(("mem", self.mem))
3937
    if self.vcpus:
3938
      instance.vcpus = self.vcpus
3939
      result.append(("vcpus",  self.vcpus))
3940
    if self.do_ip:
3941
      instance.nics[0].ip = self.ip
3942
      result.append(("ip", self.ip))
3943
    if self.bridge:
3944
      instance.nics[0].bridge = self.bridge
3945
      result.append(("bridge", self.bridge))
3946

    
3947
    self.cfg.AddInstance(instance)
3948

    
3949
    return result
3950

    
3951

    
3952
class LUQueryExports(NoHooksLU):
3953
  """Query the exports list
3954

3955
  """
3956
  _OP_REQP = []
3957

    
3958
  def CheckPrereq(self):
3959
    """Check that the nodelist contains only existing nodes.
3960

3961
    """
3962
    self.nodes = _GetWantedNodes(self, getattr(self.op, "nodes", None))
3963

    
3964
  def Exec(self, feedback_fn):
3965
    """Compute the list of all the exported system images.
3966

3967
    Returns:
3968
      a dictionary with the structure node->(export-list)
3969
      where export-list is a list of the instances exported on
3970
      that node.
3971

3972
    """
3973
    return rpc.call_export_list(self.nodes)
3974

    
3975

    
3976
class LUExportInstance(LogicalUnit):
3977
  """Export an instance to an image in the cluster.
3978

3979
  """
3980
  HPATH = "instance-export"
3981
  HTYPE = constants.HTYPE_INSTANCE
3982
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
3983

    
3984
  def BuildHooksEnv(self):
3985
    """Build hooks env.
3986

3987
    This will run on the master, primary node and target node.
3988

3989
    """
3990
    env = {
3991
      "EXPORT_NODE": self.op.target_node,
3992
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
3993
      }
3994
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3995
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
3996
          self.op.target_node]
3997
    return env, nl, nl
3998

    
3999
  def CheckPrereq(self):
4000
    """Check prerequisites.
4001

4002
    This checks that the instance name is a valid one.
4003

4004
    """
4005
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4006
    self.instance = self.cfg.GetInstanceInfo(instance_name)
4007
    if self.instance is None:
4008
      raise errors.OpPrereqError("Instance '%s' not found" %
4009
                                 self.op.instance_name)
4010

    
4011
    # node verification
4012
    dst_node_short = self.cfg.ExpandNodeName(self.op.target_node)
4013
    self.dst_node = self.cfg.GetNodeInfo(dst_node_short)
4014

    
4015
    if self.dst_node is None:
4016
      raise errors.OpPrereqError("Destination node '%s' is unknown." %
4017
                                 self.op.target_node)
4018
    self.op.target_node = self.dst_node.name
4019

    
4020
  def Exec(self, feedback_fn):
4021
    """Export an instance to an image in the cluster.
4022

4023
    """
4024
    instance = self.instance
4025
    dst_node = self.dst_node
4026
    src_node = instance.primary_node
4027
    # shutdown the instance, unless requested not to do so
4028
    if self.op.shutdown:
4029
      op = opcodes.OpShutdownInstance(instance_name=instance.name)
4030
      self.processor.ChainOpCode(op)
4031

    
4032
    vgname = self.cfg.GetVGName()
4033

    
4034
    snap_disks = []
4035

    
4036
    try:
4037
      for disk in instance.disks:
4038
        if disk.iv_name == "sda":
4039
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
4040
          new_dev_name = rpc.call_blockdev_snapshot(src_node, disk)
4041

    
4042
          if not new_dev_name:
4043
            logger.Error("could not snapshot block device %s on node %s" %
4044
                         (disk.logical_id[1], src_node))
4045
          else:
4046
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
4047
                                      logical_id=(vgname, new_dev_name),
4048
                                      physical_id=(vgname, new_dev_name),
4049
                                      iv_name=disk.iv_name)
4050
            snap_disks.append(new_dev)
4051

    
4052
    finally:
4053
      if self.op.shutdown:
4054
        op = opcodes.OpStartupInstance(instance_name=instance.name,
4055
                                       force=False)
4056
        self.processor.ChainOpCode(op)
4057

    
4058
    # TODO: check for size
4059

    
4060
    for dev in snap_disks:
4061
      if not rpc.call_snapshot_export(src_node, dev, dst_node.name,
4062
                                           instance):
4063
        logger.Error("could not export block device %s from node"
4064
                     " %s to node %s" %
4065
                     (dev.logical_id[1], src_node, dst_node.name))
4066
      if not rpc.call_blockdev_remove(src_node, dev):
4067
        logger.Error("could not remove snapshot block device %s from"
4068
                     " node %s" % (dev.logical_id[1], src_node))
4069

    
4070
    if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
4071
      logger.Error("could not finalize export for instance %s on node %s" %
4072
                   (instance.name, dst_node.name))
4073

    
4074
    nodelist = self.cfg.GetNodeList()
4075
    nodelist.remove(dst_node.name)
4076

    
4077
    # on one-node clusters nodelist will be empty after the removal
4078
    # if we proceed the backup would be removed because OpQueryExports
4079
    # substitutes an empty list with the full cluster node list.
4080
    if nodelist:
4081
      op = opcodes.OpQueryExports(nodes=nodelist)
4082
      exportlist = self.processor.ChainOpCode(op)
4083
      for node in exportlist:
4084
        if instance.name in exportlist[node]:
4085
          if not rpc.call_export_remove(node, instance.name):
4086
            logger.Error("could not remove older export for instance %s"
4087
                         " on node %s" % (instance.name, node))
4088

    
4089

    
4090
class TagsLU(NoHooksLU):
4091
  """Generic tags LU.
4092

4093
  This is an abstract class which is the parent of all the other tags LUs.
4094

4095
  """
4096
  def CheckPrereq(self):
4097
    """Check prerequisites.
4098

4099
    """
4100
    if self.op.kind == constants.TAG_CLUSTER:
4101
      self.target = self.cfg.GetClusterInfo()
4102
    elif self.op.kind == constants.TAG_NODE:
4103
      name = self.cfg.ExpandNodeName(self.op.name)
4104
      if name is None:
4105
        raise errors.OpPrereqError("Invalid node name (%s)" %
4106
                                   (self.op.name,))
4107
      self.op.name = name
4108
      self.target = self.cfg.GetNodeInfo(name)
4109
    elif self.op.kind == constants.TAG_INSTANCE:
4110
      name = self.cfg.ExpandInstanceName(self.op.name)
4111
      if name is None:
4112
        raise errors.OpPrereqError("Invalid instance name (%s)" %
4113
                                   (self.op.name,))
4114
      self.op.name = name
4115
      self.target = self.cfg.GetInstanceInfo(name)
4116
    else:
4117
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
4118
                                 str(self.op.kind))
4119

    
4120

    
4121
class LUGetTags(TagsLU):
4122
  """Returns the tags of a given object.
4123

4124
  """
4125
  _OP_REQP = ["kind", "name"]
4126

    
4127
  def Exec(self, feedback_fn):
4128
    """Returns the tag list.
4129

4130
    """
4131
    return self.target.GetTags()
4132

    
4133

    
4134
class LUAddTags(TagsLU):
4135
  """Sets a tag on a given object.
4136

4137
  """
4138
  _OP_REQP = ["kind", "name", "tags"]
4139

    
4140
  def CheckPrereq(self):
4141
    """Check prerequisites.
4142

4143
    This checks the type and length of the tag name and value.
4144

4145
    """
4146
    TagsLU.CheckPrereq(self)
4147
    for tag in self.op.tags:
4148
      objects.TaggableObject.ValidateTag(tag)
4149

    
4150
  def Exec(self, feedback_fn):
4151
    """Sets the tag.
4152

4153
    """
4154
    try:
4155
      for tag in self.op.tags:
4156
        self.target.AddTag(tag)
4157
    except errors.TagError, err:
4158
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
4159
    try:
4160
      self.cfg.Update(self.target)
4161
    except errors.ConfigurationError:
4162
      raise errors.OpRetryError("There has been a modification to the"
4163
                                " config file and the operation has been"
4164
                                " aborted. Please retry.")
4165

    
4166

    
4167
class LUDelTags(TagsLU):
4168
  """Delete a list of tags from a given object.
4169

4170
  """
4171
  _OP_REQP = ["kind", "name", "tags"]
4172

    
4173
  def CheckPrereq(self):
4174
    """Check prerequisites.
4175

4176
    This checks that we have the given tag.
4177

4178
    """
4179
    TagsLU.CheckPrereq(self)
4180
    for tag in self.op.tags:
4181
      objects.TaggableObject.ValidateTag(tag)
4182
    del_tags = frozenset(self.op.tags)
4183
    cur_tags = self.target.GetTags()
4184
    if not del_tags <= cur_tags:
4185
      diff_tags = del_tags - cur_tags
4186
      diff_names = ["'%s'" % tag for tag in diff_tags]
4187
      diff_names.sort()
4188
      raise errors.OpPrereqError("Tag(s) %s not found" %
4189
                                 (",".join(diff_names)))
4190

    
4191
  def Exec(self, feedback_fn):
4192
    """Remove the tag from the object.
4193

4194
    """
4195
    for tag in self.op.tags:
4196
      self.target.RemoveTag(tag)
4197
    try:
4198
      self.cfg.Update(self.target)
4199
    except errors.ConfigurationError:
4200
      raise errors.OpRetryError("There has been a modification to the"
4201
                                " config file and the operation has been"
4202
                                " aborted. Please retry.")